1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-25 20:59:28 +01:00

Compare commits

...

474 Commits

Author SHA1 Message Date
Kubernetes Prow Robot
59834cf8a7 Merge pull request #936 from pravarag/update-helm-chart-v1.25
Update helm chart version to v1.25.0
2022-09-14 09:03:00 -07:00
Kubernetes Prow Robot
82ed18fd2b Merge pull request #947 from eminaktas/metric-label-fix
feat: change DeschedulerVersion and GitVersion labels
2022-09-12 12:23:05 -07:00
eminaktas
2c17af79f4 feat: change DeschedulerVersion and GitVersion labels
This commit changes build_info metric labels
- AppVersion label will show major+minor version
  for example 0.24.1
  minor version numbers and commit hash

Signed-off-by: eminaktas <eminaktas34@gmail.com>
2022-09-12 21:36:40 +03:00
Kubernetes Prow Robot
72bf50fde6 Merge pull request #929 from knelasevero/ev-filter-plugin
Add new DefaultEvictor plugin with args
2022-09-12 09:21:24 -07:00
Lucas Severo Alves
f47c2c4407 add new preevectionfilter plugin with args 2022-09-12 16:56:21 +02:00
Kubernetes Prow Robot
16619fcf44 Merge pull request #931 from a7i/amir/v1beta1
remove TODO comments for cronjob v1beta1 support
2022-09-12 05:07:25 -07:00
Kubernetes Prow Robot
0317be1b76 Merge pull request #935 from pravarag/update-docs-1.25-release
Update docs & manifests for v0.25.0
2022-09-08 06:49:45 -07:00
Kubernetes Prow Robot
d8bac08592 Merge pull request #945 from gallowaystorm/patch-1
feat: add RemovePodsHavingTooManyRestarts to values.yaml
2022-09-07 18:22:06 -07:00
Storm Galloway
d14df1fedf feat: add RemovePodsHavingTooManyRestarts to yaml
This does the following:
1. Enables RemovePodsHavingTooManyRestarts when using Helm by default (it is not currently)
2. Adds RemovePodsHavingTooManyRestarts to the values.yaml for clearer configs
2022-09-07 14:42:35 -05:00
Kubernetes Prow Robot
8a769603a6 Merge pull request #928 from a7i/podlifetime-states-version-clarification
clarify which version PodLifeTime introduced states parameter and deprecated podStatusPhases
2022-09-07 09:40:37 -07:00
Kubernetes Prow Robot
137a6b999f Merge pull request #943 from gallowaystorm/patch-1
Add RemovePodsViolatingTopologySpreadConstraint to values.yaml
2022-09-06 19:58:37 -07:00
Storm Galloway
334b4bb12c Add RemovePodsViolatingTopologySpreadConstraint to values.yaml
- add RemovePodsViolatingTopologySpreadConstraint yaml to values.yaml to make chart config clearer
2022-09-06 12:47:06 -05:00
Kubernetes Prow Robot
5a2a180f17 Merge pull request #938 from a7i/remove-kubectl-dep
remove dependency on kubectl
2022-09-06 09:33:52 -07:00
Amir Alavi
1265b4c325 remove dependency on kubectl
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2022-09-06 10:47:22 -04:00
Kubernetes Prow Robot
ea8e648cfb Merge pull request #933 from a7i/1.25-rc.0
Bump to k8s 1.25
2022-09-06 07:30:56 -07:00
Amir Alavi
e8fae9a3b7 remove pod security policy; additional policy/v1beta1 cleanup; use informers for descheduler unit tests
update go to 1.19 and helm kubernetes cluster to 1.25
bump -rc.0 to 1.25 GA
bump k8s utils library
bump golang-ci
use go 1.19 for helm github action
upgrade kubectl from 0.20 to 0.25

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2022-09-04 10:30:40 -04:00
JaneLiuL
c9b0fbe467 Bump to k8s 1.25-rc.0 2022-09-03 09:57:56 -04:00
Pravar Agrawal
66694bb767 update helm chart version to v1.25.0 2022-09-03 00:00:16 +05:30
Pravar Agrawal
e68ceb2273 Update docs & manifests for v0.25.0 2022-09-02 23:51:21 +05:30
Amir Alavi
dcb81f65a9 remove TODO comments for cronjob v1beta1 support 2022-08-30 15:53:21 -04:00
Amir Alavi
face080485 clarify which version PodLifeTime introduced states parameter and deprecated podStatusPhases
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2022-08-27 14:59:14 -04:00
Kubernetes Prow Robot
1eade5bf91 Merge pull request #922 from jklaw90/remove-plugin
removing dupe plugin interface check
2022-08-25 12:48:08 -07:00
Julian Lawrence
bfcd310a16 removing dupe plugin interface check 2022-08-24 16:12:24 -07:00
Kubernetes Prow Robot
70df89601a Merge pull request #910 from JaneLiuL/master
bring lownodeutilization and highnodeutilization to plugin
2022-08-17 05:41:13 -07:00
JaneLiuL
680e650706 bring lownodeutilization and highnodeutilization to plugin 2022-08-17 17:30:46 +08:00
Kubernetes Prow Robot
b743b2d5f7 Merge pull request #903 from knelasevero/migrate-podantiaffinity
Migrate RemovePodsViolatingInterPodAntiAffinity into a plugin
2022-08-16 07:59:08 -07:00
Kubernetes Prow Robot
cfc5d0c24a Merge pull request #916 from ingvagabund/skip-fitsRequest-for-current-node
NodeFit: do not check whether node fitsRequest when a pod is already assigned to the node
2022-08-16 07:31:08 -07:00
Kubernetes Prow Robot
ddd145c69a Merge pull request #898 from JaneLiuL/security-gh
add security scan into gh-actions
2022-08-16 05:47:08 -07:00
Jan Chaloupka
d99bdfffc8 NodeFit: do not check whether node fitsRequest when a pod is already assigned to the node 2022-08-16 13:38:11 +02:00
Lucas Severo Alves
a2dd86ac3b Migrate RemovePodsViolatingInterPodAntiAffinity into a plugin 2022-08-16 12:29:25 +02:00
JaneLiuL
50676622de add security scan into gh-actions 2022-08-16 09:36:46 +08:00
Kubernetes Prow Robot
fa3ddc6fee Merge pull request #908 from jklaw90/migrate-RemovePodsViolatingTopologySpreadConstraint
RemovePodsViolatingTopologySpreadConstraint Plugin
2022-08-15 14:25:55 -07:00
Julian Lawrence
674bf4655d migrate plugin - pods violating topologyspread
updated to remove older params
2022-08-15 08:23:04 -07:00
Kubernetes Prow Robot
6d4abe88ca Merge pull request #913 from a7i/migrate-PodLifeTime-to-plugin
Migrate PodLifeTime to plugin
2022-08-15 07:14:14 -07:00
Amir Alavi
d4ff3aef61 Migrate PodLifeTime to plugin 2022-08-15 08:54:42 -04:00
Kubernetes Prow Robot
83c4f5d526 Merge pull request #912 from ingvagabund/container-engine
CONTAINER_ENGINE to override the default docker engine
2022-08-11 18:46:43 -07:00
Jan Chaloupka
d1a9190c50 CONTAINER_ENGINE to override the default docker engine 2022-08-11 16:09:46 +02:00
Kubernetes Prow Robot
a1d4770634 Merge pull request #911 from knelasevero/local-ct-install
introduce ct for local helm install test
2022-08-11 06:10:58 -07:00
Lucas Severo Alves
ba85e794b2 introduce ct for local helm install test 2022-08-10 18:01:42 +02:00
Kubernetes Prow Robot
0a50d5a7da Merge pull request #892 from JaneLiuL/master
bring removeduplicates to plugin
2022-08-10 02:40:30 -07:00
Kubernetes Prow Robot
2de4e23425 Merge pull request #906 from a7i/node-affinity-use-existing-validation
NodeAffinity plugin to use the existing validation methods
2022-08-10 02:02:31 -07:00
JaneLiuL
3474725176 bring removeduplicates to plugin 2022-08-10 15:02:28 +08:00
Amir Alavi
27fa7a70a1 NodeAffinity plugin to use the existing validation methods 2022-08-09 13:38:33 -04:00
Kubernetes Prow Robot
ccfaeb2275 Merge pull request #902 from BinacsLee/binacs/migrate-removepodshavingtoomanyrestarts-to-plugin
Migrate RemovePodsHavingTooManyRestarts to plugin
2022-08-09 09:10:36 -07:00
BinacsLee
d798e7d204 Migrate RemovePodsHavingTooManyRestarts to plugin 2022-08-09 22:05:36 +08:00
Kubernetes Prow Robot
788e9f86bd Merge pull request #860 from knelasevero/migrate-node-afinity-to-plugin
Migrate RemovePodsViolatingNodeAffinity to plugin
2022-08-09 06:14:51 -07:00
Lucas Severo Alves
0c3bf7f957 Migrate RemovePodsViolatingNodeAffinity into a plugin 2022-08-09 14:05:51 +02:00
Kubernetes Prow Robot
349453264e Merge pull request #904 from knelasevero/add-helm-test-step-1
add helm ct install.
2022-08-09 03:54:51 -07:00
Lucas Severo Alves
e9c23fe42f add helm ct install. First step, see https://github.com/kubernetes-sigs/descheduler/pull/895#issuecomment-1203608848 2022-08-08 21:11:20 +02:00
Kubernetes Prow Robot
27ed7d15b9 Merge pull request #899 from a7i/separate-args-validations
separate args validation for better reuse
2022-08-08 08:46:19 -07:00
Amir Alavi
55d4ed479c separate args validation for better reuse 2022-08-05 10:46:04 -04:00
Kubernetes Prow Robot
d109ea64d0 Merge pull request #861 from a7i/migrate-RemoveFailedPods-to-plugin
Migrate RemoveFailedPods to plugin
2022-08-04 07:49:46 -07:00
Amir Alavi
330def2e56 Migrate RemoveFailedPods to plugin 2022-08-02 23:30:49 -04:00
Kubernetes Prow Robot
9880ed5372 Merge pull request #896 from ingvagabund/update-owners
Remove emeritus_approvers from reviewers
2022-08-02 07:31:45 -07:00
Jan Chaloupka
d4ecff5ba4 Remove emeritus_approvers from reviewers 2022-08-02 12:46:23 +02:00
Kubernetes Prow Robot
46e712163a Merge pull request #888 from knelasevero/fix-memory-leak-shutdown-broadcaster
fix: events memory leak. Using new events implementation and take recorder out of EvictPod
2022-08-01 05:14:28 -07:00
Kubernetes Prow Robot
0d1d485850 Merge pull request #894 from ingvagabund/remove-migrated-node-taint-strategy
Remove RemovePodsViolatingNodeTaints strategy already migrated into a plugin
2022-07-28 07:47:11 -07:00
Jan Chaloupka
1294106a22 Remove RemovePodsViolatingNodeTaints strategy already migrated into a plugin 2022-07-28 16:34:42 +02:00
Lucas Severo Alves
0aa233415e use new events implementation and take recorder out of EvictPod 2022-07-28 15:18:21 +02:00
Kubernetes Prow Robot
0d3ff8a84f Merge pull request #857 from ingvagabund/migrate-RemovePodsViolatingNodeTaints-to-plugin
Migrate RemovePodsViolatingNodeTaints to a plugin
2022-07-26 05:59:10 -07:00
Jan Chaloupka
704f6d4496 Migrate RemovePodsViolatingNodeTaints into a plugin 2022-07-21 20:52:24 +02:00
Kubernetes Prow Robot
c699dd1ccc Merge pull request #885 from damemi/evict-options
Add EvictOptions struct to EvictPod()
2022-07-20 10:08:35 -07:00
Mike Dame
d5e66ab62e Add EvictOptions struct to EvictPod() 2022-07-20 16:52:44 +00:00
Kubernetes Prow Robot
3a486f1a79 Merge pull request #882 from iijimakazuyuki/master
Add default lease resource name in Helm chart's ClusterRole
2022-07-11 07:15:49 -07:00
Kubernetes Prow Robot
6e69a10396 Merge pull request #846 from ingvagabund/evictor-interface
Prepare pod evictor for the descheduling framework plugin
2022-07-09 11:27:46 -07:00
Kubernetes Prow Robot
d78994fe6d Merge pull request #883 from a7i/approver-a7i
code approvers: add a7i
2022-07-08 08:23:46 -07:00
Amir Alavi
9ef87b9937 code approvers: add a7i 2022-07-08 09:09:42 -04:00
Kazuyuki Iijima
8b849106ed Add default lease resource name in ClusterRole
Signed-off-by: Kazuyuki Iijima <iijimakazuyuki@gmail.com>
2022-07-08 21:45:17 +09:00
Kubernetes Prow Robot
2ea0a2e1c0 Merge pull request #876 from iijimakazuyuki/master
Use lease resource name from Helm value
2022-07-08 04:43:47 -07:00
Kazuyuki Iijima
329c357834 Use lease resource name from Helm value
Signed-off-by: Kazuyuki Iijima <iijimakazuyuki@gmail.com>
2022-07-08 00:25:39 +09:00
Kubernetes Prow Robot
8072a8c82e Merge pull request #871 from knelasevero/fix-chart-path
fix: chart path can't be relative
2022-07-07 07:11:34 -07:00
Kubernetes Prow Robot
7a7393f5ff Merge pull request #872 from JaneLiuL/master
fix log-file and log-dir issue
2022-07-07 06:57:35 -07:00
Lucas Severo Alves
df65157a3b disable vcs maintainer check 2022-07-06 20:09:07 +02:00
JaneLiuL
754f8c9def fix log-file and log-dir issue 2022-07-06 15:50:43 +08:00
Lucas Severo Alves
d75e9e8c4e fix comment space lint issue 2022-07-04 18:26:28 +02:00
Lucas Severo Alves
2cd79c6816 chart path can´t be relative 2022-07-04 18:19:09 +02:00
Kubernetes Prow Robot
aff9a0ba06 Merge pull request #836 from a7i/balancedomains-belowavg
TopologySpreadConstraint: only evaluate nodes below ideal avg when balancing domains
2022-07-03 18:57:22 -07:00
Kubernetes Prow Robot
e1a10c36de Merge pull request #854 from knelasevero/improve-helm-setup
Improving helm setup
2022-07-01 11:03:23 -07:00
Kubernetes Prow Robot
d8897635b0 Merge pull request #834 from a7i/podlifetime-container-state
PodLifeTime: support container states PodInitializing and ContainerCreating
2022-06-28 07:59:58 -07:00
Amir Alavi
abf5752260 PodLifeTime: add States field and deprecate PodStatusPhases 2022-06-25 15:13:18 -04:00
Amir Alavi
934fffb669 RemovePodsViolatingTopologySpreadConstraint: test case to cover tainted nodes and eviction loop 2022-06-20 20:53:30 -04:00
Amir Alavi
7a5e67d462 topologyspreadconstraint_test: ensure specific pods were evicted 2022-06-20 19:21:58 -04:00
Amir Alavi
469bde0a01 TopologySpreadConstraint: only evaluate nodes below ideal avg when balancing domains 2022-06-20 18:42:34 -04:00
Jan Chaloupka
c838614b6c EvictPod: stop returning an error
When an error is returned a strategy either stops completely or starts
processing another node. Given the error can be a transient error or
only one of the limits can get exceeded it is fair to just skip a
pod that failed eviction and proceed to the next instead.

In order to optimize the processing and stop earlier, it is more
practical to implement a check which will say when a limit was
exceeded.
2022-06-17 10:12:57 +02:00
Jan Chaloupka
cc49f9fcc2 Drop node parameter from EvictPod
The method uses the node object to only get the node name.
The node name can be retrieved from the pod object.

Some strategies might try to evict a pod in Pending state which
does not have the .spec.nodeName field set. Thus, skipping
the test for the node limit.
2022-06-17 10:10:25 +02:00
Amir Alavi
4e710cdf3b PodLifeTime: support container states PodInitializing and ContainerCreating 2022-06-16 21:17:49 -04:00
Jan Chaloupka
d5ee855221 Pass the strategy name into evictor through context 2022-06-16 16:32:13 +02:00
Kubernetes Prow Robot
b2418ef481 Merge pull request #847 from ingvagabund/split-pod-evictor-and-evictor-filter
Split pod evictor and evictor filter
2022-06-16 07:22:49 -07:00
Kubernetes Prow Robot
1f1aad335a Merge pull request #856 from a7i/pod-sort-age-random
remove random creation timestamp from pod sort by age test
2022-06-16 01:18:49 -07:00
Kubernetes Prow Robot
627d219dab Merge pull request #852 from knelasevero/existing-contraints
Check existing constraints before assigning
2022-06-15 06:07:17 -07:00
Lucas Severo Alves
30c972e49e change namespaceTopologySpreadConstraints from map to slice 2022-06-15 14:54:56 +02:00
Amir Alavi
a7cfb25e9b remove random creation timestamp from pod sort by age test 2022-06-14 23:28:58 -04:00
Lucas Severo Alves
45e1cdbd01 WIP: improving helm setup 2022-06-14 17:08:00 +02:00
Lucas Severo Alves
dad3db3187 Check existing constraints before assigning 2022-06-14 10:43:59 +02:00
Jan Chaloupka
d2130747d8 Split pod evictor and evictor filter 2022-06-13 18:48:05 +02:00
Kubernetes Prow Robot
84c8d1ca03 Merge pull request #843 from damemi/docs-readme-1.24.1
Update docs, manifests for 0.24.1 on master
2022-06-08 06:58:24 -07:00
Mike Dame
5dfd54e500 Docs and readme updates 2022-06-08 13:35:22 +00:00
Kubernetes Prow Robot
7550fba2fa Merge pull request #840 from a7i/nodefit-docs
nodeFit: fix docs spacing for placement of the field
2022-06-07 12:28:27 -07:00
Amir Alavi
038b6e1ca7 nodeFit: fix docs spacing for placement of the field 2022-06-07 11:59:30 -04:00
Kubernetes Prow Robot
98a946dea7 Merge pull request #833 from a7i/podlifetime-sort-age
PodLifeTime: sort pods by creation timestamp
2022-06-07 01:23:49 -07:00
Amir Alavi
871a10344e e2e: use kubernetes utils pointer library 2022-06-06 22:05:32 -04:00
Amir Alavi
311d75223f PodLifeTime: sort pods by creation timestamp 2022-06-06 21:49:24 -04:00
Kubernetes Prow Robot
33807ed8e4 Merge pull request #830 from a7i/revert-805-cleanup/lownodeutilization
Revert "cleanup lownodeutilization code"
2022-06-01 08:51:03 -07:00
Amir Alavi
3cc0a68f13 lownodeutilization: clarify comments and variable naming for underutilized vs. overutilized 2022-06-01 11:39:38 -04:00
Amir Alavi
8e1d35cb3c Revert "cleanup lownodeutilization code" 2022-06-01 11:28:09 -04:00
Kubernetes Prow Robot
59c4904ddc Merge pull request #805 from xiaoanyunfei/cleanup/lownodeutilization
cleanup lownodeutilization code
2022-06-01 07:45:02 -07:00
Kubernetes Prow Robot
c5604c760d Merge pull request #825 from damemi/cloudbuild-timeout
Increase cloudbuild timeout to 25 minutes
2022-06-01 06:47:02 -07:00
Mike Dame
f769296243 Increase cloudbuild timeout to 25 minutes 2022-05-31 18:51:53 +00:00
Kubernetes Prow Robot
8972bd9bf0 Merge pull request #823 from damemi/fix-version-cmd
Fix version command to parse helm chart tags
2022-05-31 11:14:53 -07:00
Kubernetes Prow Robot
873381197b Merge pull request #821 from damemi/test-version-updates
Update helm tests util versions and release guide
2022-05-31 10:44:52 -07:00
Mike Dame
af45591c25 Fix version command to parse helm chart tags 2022-05-31 17:39:08 +00:00
Mike Dame
17e986418f Update helm tests util versions and release guide 2022-05-31 17:05:14 +00:00
Kubernetes Prow Robot
5a9e65833f Merge pull request #818 from damemi/release-guide-updates
Update release guide docs
2022-05-26 08:21:24 -07:00
Mike Dame
725ca47bda Update release guide docs 2022-05-25 15:36:58 +00:00
Kubernetes Prow Robot
f39058af1c Merge pull request #813 from stephan2012/bugfix/leader-election-chart-812
Arguments must be strings, not bool or number
2022-05-25 07:12:44 -07:00
Kubernetes Prow Robot
332d61dba8 Merge pull request #814 from stephan2012/bugfix/missing-keys-803
Add podAnnotations and podLabels to values and docs
2022-05-24 09:38:06 -07:00
Stephan Austermühle
3cbae5e72b Fix type error for the leader election
Also, add the missing update verb in the ClusterRole and adds required
time units to leaseDuration, renewDeadline, retryPeriod in the Chart
example.
2022-05-24 18:11:18 +02:00
Stephan Austermühle
d8a609a6e7 Add more precise description 2022-05-24 18:07:58 +02:00
Stephan Austermühle
f0fa4c0cc0 Add podAnnotations and podLabels to values and docs 2022-05-24 10:02:16 +02:00
Kubernetes Prow Robot
e61823c299 Merge pull request #809 from damemi/CVE-2022-27191
bump: golang.org/x/crypto
2022-05-23 21:39:10 -07:00
Mike Dame
14b83e6cc5 bump: golang.org/x/crypto 2022-05-23 21:17:27 +00:00
sunxiaofei
5e3b825427 cleanup lownodeutilization code 2022-05-23 17:20:35 +08:00
Kubernetes Prow Robot
15794ba00d Merge pull request #801 from KohlsTechnology/bump-go-1.18
Bump To Go 1.18.2
2022-05-18 23:54:07 -07:00
Sean Malloy
e494a5817e Bump To Go 1.18.2
The main k/k repo was updated to Go 1.18.2 for the
k8s v1.24.0 release. See below PR for reference.

https://github.com/kubernetes/kubernetes/pull/110044
2022-05-18 09:36:32 -05:00
Kubernetes Prow Robot
eb0be65687 Merge pull request #796 from JaneLiuL/master
Update helm chart version to v0.24
2022-05-16 15:06:19 -07:00
JaneLiuL
64786460cd Update helm chart version to v0.24 2022-05-13 08:27:15 +08:00
Kubernetes Prow Robot
9c110c4004 Merge pull request #791 from JaneLiuL/master
Bump to k8s 1.24.0
2022-05-12 12:06:33 -07:00
Kubernetes Prow Robot
0eddf7f108 Merge pull request #792 from pravarag/update-docs-1.24
Update Docs and Manifests for v0.24.0
2022-05-12 11:31:15 -07:00
Kubernetes Prow Robot
3c8d6c4d53 Merge pull request #795 from damemi/update-e2e
Update e2e test versions
2022-05-12 09:29:14 -07:00
Mike Dame
6e84d0a6ba React to removal of offensive language
https://github.com/kubernetes/kubeadm/issues/2200 went into effect in 1.24, so
e2es broke without the update.
2022-05-12 15:35:07 +00:00
Mike Dame
fb1df468ad golint fix 2022-05-12 14:21:34 +00:00
Mike Dame
ac4d576df8 Update e2e test versions 2022-05-12 14:16:53 +00:00
Pravar Agrawal
314ad65b04 Update docs and manifests for v0.24.0 2022-05-04 22:08:49 +05:30
JaneLiuL
969a618933 Bump to k8s 1.24.0 2022-05-04 10:17:47 +08:00
Kubernetes Prow Robot
028f205e8c Merge pull request #790 from ingvagabund/636
Added request considerations to NodeFit Feature [#636 follow up]
2022-05-03 19:09:16 -07:00
Jan Chaloupka
3eca2782d4 Addressing review comments
Both LowNode and HighNode utilization strategies evict only as many pods
as there's free resources on other nodes. Thus, the resource fit test
is always true by definition.
2022-04-28 18:54:54 +02:00
RyanDevlin
16eb9063b6 NodeFit parameter now considers pod requests 2022-04-28 10:16:52 +02:00
Kubernetes Prow Robot
eac3b4b54a Merge pull request #788 from ryan4yin/master
fix: incorrect yaml indentation in readme
2022-04-26 06:46:53 -07:00
Ryan Yin
d08cea731a fix: incorrect indentation 2022-04-26 06:05:12 +08:00
Kubernetes Prow Robot
0fc5ba9316 Merge pull request #787 from JaneLiuL/master
bump to k8s 1.24-rc.0
2022-04-25 12:05:43 -07:00
JaneLiuL
ecbd10afe2 bump to k8s 1.24-rc.0 2022-04-21 09:11:04 +08:00
Kubernetes Prow Robot
e5ed0540f2 Merge pull request #779 from pravarag/user-docs-typo
Fix missing param in user-guide for PodLifeTime strategy
2022-04-11 01:44:06 -07:00
Pravar Agrawal
4e972a7602 fix missing param in user-guide 2022-04-07 10:02:26 +05:30
Kubernetes Prow Robot
ae20b5b034 Merge pull request #732 from eminaktas/feature/metric-scape
feat: Add metric scrape configs in Helm Chart
2022-03-30 07:06:27 -07:00
Kubernetes Prow Robot
406e3ed5b3 Merge pull request #771 from dineshbhor/fix-highnodeutilization-node-sorting
Sort nodes in ascending order for HighNodeUtilization
2022-03-29 02:58:47 -07:00
dineshbhor
7589aaf00b Sort nodes in ascending order for HighNodeUtilization 2022-03-29 17:54:18 +09:00
eminaktas
ca90b53913 feat: Add metric scrape configs in Helm Chart
Signed-off-by: eminaktas <emin.aktas@trendyol.com>
2022-03-28 23:41:56 +03:00
Kubernetes Prow Robot
238eebeaca Merge pull request #722 from Dentrax/feature/leaderelection
feat(leaderelection): impl leader election for HA Deployment
2022-03-28 09:39:23 -07:00
Kubernetes Prow Robot
cf59d08193 Merge pull request #751 from HelmutLety/redo_#473
feat: Add DeviationThreshold Paramter for LowNodeUtilization, (Previous attempt - #473 )
2022-03-28 03:53:24 -07:00
HelmutLety
2ea65e69dc feat(LowNodeUtilization): useDeviationThresholds, redo of #473
[751]: normalize Percentage in nodeutilization and clean the tests
2022-03-28 12:35:01 +02:00
Kubernetes Prow Robot
7f6a2a69b0 Merge pull request #777 from JacobHenner/support-taint-exclusions
Add RemovePodsViolatingNodeTaints taint exclusion
2022-03-28 02:47:23 -07:00
Jacob Henner
ac3362149b Add RemovePodsViolatingNodeTaints taint exclusion
Add taint exclusion to RemovePodsViolatingNodeTaints. This permits node
taints to be ignored by allowing users to specify ignored taint keys or
ignored taint key=value pairs.
2022-03-27 13:48:40 -04:00
Furkan
0a52af9ab8 feat(leaderelection): impl leader election
Signed-off-by: Furkan <furkan.turkal@trendyol.com>
Signed-off-by: eminaktas <eminaktas34@gmail.com>
Co-authored-by: Emin <emin.aktas@trendyol.com>
Co-authored-by: Yasin <yasintaha.erol@trendyol.com>
2022-03-25 14:33:14 +03:00
Kubernetes Prow Robot
07bbdc61c4 Merge pull request #762 from ingvagabund/nodeutilization-refactor
Promote NodeUsage to NodeInfo, evaluate thresholds separately
2022-03-15 17:33:48 -07:00
Kubernetes Prow Robot
17595fdcfc Merge pull request #764 from ingvagabund/taints-prefer-no-scheduler
RemovePodsViolatingNodeTaints: optionally include PreferNoSchedule taint
2022-03-14 17:36:10 -07:00
Jan Chaloupka
285523f0d9 RemovePodsViolatingNodeTaints: optionally include PreferNoSchedule taint 2022-03-14 16:46:03 +01:00
Kubernetes Prow Robot
c55a897599 Merge pull request #759 from JaneLiuL/master
OWNERS: add janeliul as a reviewer
2022-03-11 10:29:07 -08:00
Jan Chaloupka
52ff50f2d1 Promote NodeUsage to NodeInfo, evaluate thresholds separately 2022-03-11 13:52:37 +01:00
Jan Chaloupka
8ebf3fb323 nodeutilization: move node resource threshold value computation under a separate function 2022-03-11 12:46:11 +01:00
Kubernetes Prow Robot
0e0ae8df90 Merge pull request #761 from ingvagabund/TestTooManyRestarts-II
[e2e] TestTooManyRestarts: check if container status is set before accessing
2022-03-11 02:29:06 -08:00
Jan Chaloupka
bd3daa82d3 [e2e] TestTooManyRestarts: check if container status is set before accessing 2022-03-11 10:35:49 +01:00
Kubernetes Prow Robot
60a15f0392 Merge pull request #760 from ingvagabund/TestTooManyRestarts
[e2e] TestTooManyRestarts: check err and len before accessing pod items
2022-03-11 01:09:07 -08:00
Jan Chaloupka
d98cb84568 [e2e] TestTooManyRestarts: check err and len before accessing pod items 2022-03-11 09:45:05 +01:00
Kubernetes Prow Robot
6ab01eca63 Merge pull request #758 from hiroyaonoe/add-doc-about-max-no-of-pods-to-evict-per-namespace-policy
Update docs for maxNoOfPodsToEvictPerNamespace
2022-03-10 11:25:21 -08:00
Kubernetes Prow Robot
584ac2d604 Merge pull request #757 from prune998/prune/taint-logs
add conflicting taint to the logs
2022-03-10 05:37:35 -08:00
prune
448dc4784c add conflicting taint to the logs
log when count mismatch


simplified logic to log blocking taints
2022-03-10 08:05:42 -05:00
JaneLiuL
3ca77e7a3d OWNERS: add janeliul as a reviewer 2022-03-08 07:48:11 +08:00
Hiroya Onoe
01e7015b97 Update docs for maxNoOfPodsToEvictPerNamespace 2022-03-07 16:21:04 +09:00
Kubernetes Prow Robot
fd5a8c7d78 Merge pull request #739 from JaneLiuL/master
Share links to all descheduler ehnacements proposals in the project repo
2022-03-02 09:55:14 -08:00
Kubernetes Prow Robot
43148ecd0c Merge pull request #740 from JaneLiuL/doc-npd
fix doc about NPD description
2022-03-01 09:59:55 -08:00
Kubernetes Prow Robot
16501978dc Merge pull request #748 from damemi/update-v0.23.1
Update manifests and doc for v0.23.1
2022-03-01 07:47:46 -08:00
Mike Dame
1b4e48b006 Update manifests and doc for v0.23.1 2022-02-28 19:06:50 +00:00
Kubernetes Prow Robot
da6a3e063f Merge pull request #744 from antonio-te/master
Update golang image
2022-02-28 10:41:46 -08:00
Antonio Gurgel
5784c0cc04 Update golang image
1.17.3 is affected by CVE-2021-44716.
2022-02-28 07:22:26 -08:00
JaneLiuL
254a3a9ec1 Share links to all descheduler ehnacements proposals in the project repository 2022-02-26 12:27:35 +08:00
JaneLiuL
328c695141 fix doc about NPD description 2022-02-26 12:23:33 +08:00
Kubernetes Prow Robot
3ab0268c5a Merge pull request #733 from JaneLiuL/master
remove MostRequestedPriority from doc since already deprecated
2022-02-24 04:32:32 -08:00
Jane Liu L
cd8dbdd1e2 remove MostRequestedPriority from doc since already deprecated 2022-02-24 09:00:36 +08:00
Kubernetes Prow Robot
54c50c5390 Merge pull request #731 from jklaw90/fix-ctx-cron
Bugfix: Cronjob ctx cancel
2022-02-22 11:35:18 -08:00
Julian Lawrence
a2cbc25397 updated to handle cronjob flow 2022-02-22 08:52:06 -08:00
Kubernetes Prow Robot
bd81f6436e Merge pull request #708 from damemi/utilization-values-readme
Clarify resource calculations in NodeUtilization strategy Readmes
2022-02-22 04:47:46 -08:00
Kubernetes Prow Robot
30be19b04e Merge pull request #715 from eminaktas/values-fix
fix: Remove deprecated parameters from cmdOptions and add the parameters under policy
2022-02-18 05:08:23 -08:00
Kubernetes Prow Robot
3c251fb09d Merge pull request #726 from jklaw90/log-eviction-node
Eviction Logs
2022-02-15 04:08:03 -08:00
Julian Lawrence
224e2b078f updated logs to help with debugging 2022-02-14 18:27:53 -08:00
Kubernetes Prow Robot
dd80d60f4f Merge pull request #716 from eminaktas/imagepullsecret
fix: add imagePullSecrets for deployment resource
2022-02-14 05:27:29 -08:00
Kubernetes Prow Robot
e88837a349 Merge pull request #704 from ingvagabund/update-chart-readme
Update charts README to reflect the new parameters
2022-02-11 14:23:46 -08:00
Kubernetes Prow Robot
5901f8af1b Merge pull request #697 from a7i/code-reviewer
OWNERS: add a7i as a reviewer
2022-02-11 08:14:23 -08:00
Kubernetes Prow Robot
0d1704a192 Merge pull request #717 from JaneLiuL/release-1.23.1
[release-1.23.1] Update helm chart version to v0.23.1
2022-02-08 04:34:54 -08:00
JaneLiuL
c5878b18c6 Update helm chart version to v0.23.1 2022-02-08 20:21:57 +08:00
emin.aktas
ff1954b32e fix: add imagePullSecrets for deployment resource
Signed-off-by: emin.aktas <eminaktas34@gmail.com>
Co-authored-by: yasintahaerol <yasintahaerol@gmail.com>
Co-authored-by: Dentrax <furkan.turkal@trendyol.com>
2022-02-07 18:05:18 +03:00
emin.aktas
4c8040bbaf fix: Remove deprecated parameters from cmdOptions and add the parameters under policy 2022-02-07 15:14:55 +03:00
Kubernetes Prow Robot
deaa314492 Merge pull request #712 from JaneLiuL/helm
fix helmchart fail to watch namespace issue
2022-02-06 10:36:51 -08:00
Jane Liu L
9c653a2274 fix helmchart fail to watch namespace issue 2022-02-04 18:34:21 +08:00
Kubernetes Prow Robot
8d37557743 Merge pull request #709 from damemi/update-helm-23
Update helm chart version to v0.23
2022-02-03 12:10:58 -08:00
Mike Dame
5081ad84b5 Update helm chart version to v0.23 2022-02-03 14:57:18 -05:00
Mike Dame
c51c066cd1 Clarify resource calculations in NodeUtilization strategy Readmes
This adds text explaining the resource calculation in LowNodeUtilization and HighNodeUtilization
2022-01-30 12:59:47 -05:00
Kubernetes Prow Robot
afb1d75ce1 Merge pull request #660 from martin-magakian/features/add_affinity_option
Adding 'affinity' support to run 'descheduler' in CronJob or Deployment
2022-01-27 05:56:27 -08:00
Jan Chaloupka
90e6174fdd Update charts README to reflect the new parameters 2022-01-27 14:46:15 +01:00
Kubernetes Prow Robot
8e3ef9a6b3 Merge pull request #694 from sharkannon/master
Updates to include annotations to the service account
2022-01-27 05:42:26 -08:00
Kubernetes Prow Robot
778a18c550 Merge pull request #700 from jklaw90/root-ctx
Use the root context cancellation
2022-01-27 05:08:25 -08:00
Julian Lawrence
1a98a566b3 adding cancelation from sigint sigterm 2022-01-25 00:10:09 -08:00
Kubernetes Prow Robot
a643c619c9 Merge pull request #699 from ingvagabund/evict-pods-report-metrics-indendent-of-the-dry-mode
Evictor: report successful eviction independently of the dry-mode
2022-01-20 14:16:29 -08:00
Jan Chaloupka
203388ff1a Evictor: report successful eviction independently of the dry-mode
Dry mode currently does not report metrics when the eviction succeeds
2022-01-20 21:23:19 +01:00
Kubernetes Prow Robot
2844f80a35 Merge pull request #677 from ingvagabund/accumulated-eviction
Use a fake client when evicting pods by individual strategies to accumulate the evictions
2022-01-20 08:15:52 -08:00
Jan Chaloupka
901a16ecbc Do not collect the metrics when the metrics server is not enabled 2022-01-20 17:04:15 +01:00
Jan Chaloupka
e0f086ff85 Use a fake client when evicting pods by individual strategies to accumulate the evictions
Currently, when the descheduler is running with the --dry-run on, no strategy actually
evicts a pod so every strategy always starts with a complete list of
pods. E.g. when the PodLifeTime strategy evicts few pods, the RemoveDuplicatePods
strategy still takes into account even the pods eliminated by the PodLifeTime
strategy. Which does not correspond to the real case scenarios as the
same pod can be evicted multiple times. Instead, use a fake client and
evict/delete the pods from its cache so the strategies evict each pod
at most once as it would be normally done in a real cluster.
2022-01-20 17:04:05 +01:00
Amir Alavi
0251935268 OWNERS: add a7i as a reviewer 2022-01-18 09:14:44 -05:00
Stephen Herd
8752a28025 Merge branch 'kubernetes-sigs-master' 2022-01-13 12:52:36 -08:00
Stephen Herd
24884c7568 Rebase from master 2022-01-13 12:52:06 -08:00
Kubernetes Prow Robot
175f648045 Merge pull request #695 from a7i/liveness-template
make livenessprobe consistent across manifests
2022-01-12 13:37:40 -08:00
Amir Alavi
f50a3fa119 make livenessprobe consistent across manifests; make helm chart configurable via values.yaml 2022-01-12 11:49:17 -05:00
Kubernetes Prow Robot
551eced42a Merge pull request #688 from babygoat/evict-failed-without-ownerrefs
feat: support eviction of failed bare pods
2022-01-11 12:31:15 -08:00
Stephen Herd
3635a8171c Updates to include annotations to the service account, needed for things such as Workload Identity in Google Cloud 2022-01-11 11:55:05 -08:00
Kubernetes Prow Robot
796f347305 Merge pull request #692 from jklaw90/sliding-until
NonSlidingUntil for deployment
2022-01-11 06:21:16 -08:00
Kubernetes Prow Robot
13abbe7f09 Merge pull request #693 from developer-guy/patch-1
Update NOTES.txt
2022-01-10 05:11:13 -08:00
Kubernetes Prow Robot
e4df54d2d1 Merge pull request #685 from JaneLiuL/master
add  liveness probe
2022-01-10 04:29:12 -08:00
Jane Liu L
c38f617e40 add liveness probe 2022-01-10 09:56:53 +08:00
Kubernetes Prow Robot
e6551564c4 Merge pull request #691 from RyanDevlin/waitForNodes
Eliminated race condition in E2E tests
2022-01-07 06:16:30 -08:00
Batuhan Apaydın
3a991dd50c Update NOTES.txt
Signed-off-by: Batuhan Apaydın <batuhan.apaydin@trendyol.com>
Co-authored-by: Furkan Türkal <furkan.turkal@trendyol.com>
Co-authored-by: Emin Aktaş <emin.aktas@trendyol.com>
Co-authored-by: Necatican Yıldırım <necatican.yildirim@trendyol.com>
Co-authored-by: Fatih Sarhan <fatih.sarhan@trendyol.com>
2022-01-07 13:42:00 +03:00
Julian Lawrence
77cb406052 updated until -> sliding until 2022-01-06 12:55:10 -08:00
RyanDevlin
921a5680ab Eliminated race condition in E2E tests 2022-01-06 09:36:13 -05:00
babygoat
1529180d70 feat: support eviction of failed bare pods
This patch adds the policy(evictFailedBarePods) to allow the failed
pods without ownerReferences to be evicted. For backward compatibility,
disable the policy by default. Address #644.
2022-01-06 01:07:41 +08:00
Kubernetes Prow Robot
2d9143d129 Merge pull request #687 from jklaw90/error-comment
Comment update for metrics
2022-01-04 06:52:52 -08:00
Kubernetes Prow Robot
e9c0833b6f Merge pull request #689 from ingvagabund/run-hack-update-generated-conversions-sh
run ./hack/update-* scripts
2022-01-04 06:34:52 -08:00
Jan Chaloupka
8462cf56d7 run ./hack/update-* scripts 2022-01-04 09:37:01 +01:00
Julian Lawrence
a60d6a527d updated comment to reflect actual value 2021-12-29 10:56:10 -08:00
Kubernetes Prow Robot
2b23694704 Merge pull request #682 from jklaw90/chart-labels
commonLabels value for chart
2021-12-26 06:15:15 -08:00
Julian Lawrence
d0a95bee2f fixed default value for common labels 2021-12-20 08:24:30 -08:00
Julian Lawrence
57a910f5d1 adding commonLabels value 2021-12-18 23:31:52 -08:00
Kubernetes Prow Robot
ccaedde183 Merge pull request #661 from kirecek/enhc/include-pod-reason
Add pod.Status.Reason to the list of reasons
2021-12-17 13:11:55 -08:00
Erik Jankovič
2020642b6f chore: add pod.Status.Reason to the list of reasons
Signed-off-by: Erik Jankovič <erik.jankovic@gmail.com>
2021-12-17 18:37:53 +01:00
Kubernetes Prow Robot
96ff5d2dd9 Merge pull request #680 from ingvagabund/klog-output-stdout
Set the klog output to stdout by default
2021-12-16 05:31:18 -08:00
Jan Chaloupka
d8718d7db3 Set the klog output to stdout by default
Also, one needs to set --logtostderr=false to properly log into the stdout
2021-12-16 11:22:40 +01:00
Kubernetes Prow Robot
1e5165ba9f Merge pull request #670 from autumn0207/improve_pod_eviction_metrics
Add node name label to the counter metric for evicted pods
2021-12-16 01:49:18 -08:00
autumn0207
8e74f8bd77 improve pod eviction metrics 2021-12-16 17:06:22 +08:00
Kubernetes Prow Robot
2424928019 Merge pull request #667 from damemi/1.23-rc.0
bump: k8s to 1.23
2021-12-15 06:56:20 -08:00
Jan Chaloupka
e6314d2c7e Init the klog directly
Since 3948cb8d1b (diff-465167b08358906be13f9641d4798c6e8ad0790395e045af8ace4d08223fa922R78)
the klog verbosity level gets always overriden.
2021-12-15 09:23:20 -05:00
Kubernetes Prow Robot
271ee3c7e3 Merge pull request #678 from a7i/golangci-fix
fix: install golangci using from the golangci repo
2021-12-15 02:20:19 -08:00
Amir Alavi
e58686c142 fix: install golangci using from the golangci repo 2021-12-14 13:18:19 -05:00
Kubernetes Prow Robot
0b2c10d6ce Merge pull request #673 from Garrybest/pr_pod_cache
list pods assigned to a node by pod informer cache
2021-12-14 01:32:04 -08:00
Garrybest
cac3b9185b reform all test files
Signed-off-by: Garrybest <garrybest@foxmail.com>
2021-12-11 19:43:16 +08:00
Mike Dame
94888e653c Move klog initialization to cli.Run() 2021-12-10 12:00:11 -05:00
Mike Dame
936578b238 Update k8s version in helm test 2021-12-10 10:14:47 -05:00
Mike Dame
4fa7bf978c run hack/update-generated-deep-copies.sh 2021-12-10 10:02:39 -05:00
Mike Dame
2f7c496944 React to 1.23 bump
Logging validation functions changed in upstream commit
54ecfcdac8.
This uses the new function name.
2021-12-10 10:02:26 -05:00
Mike Dame
5fe3ca86ff bump: k8s to 1.23 2021-12-10 10:02:14 -05:00
Garrybest
0ff8ecb41e reform all strategies by using getPodsAssignedToNode
Signed-off-by: Garrybest <garrybest@foxmail.com>
2021-12-10 19:28:51 +08:00
Garrybest
08ed129a07 reform ListPodsOnANode by using pod informer and indexer
Signed-off-by: Garrybest <garrybest@foxmail.com>
2021-12-10 19:25:20 +08:00
Kubernetes Prow Robot
49ad197dfc Merge pull request #658 from JaneLiuL/master
Add maxNoOfPodsToEvictPerNamespace policy
2021-12-03 01:50:27 -08:00
Kubernetes Prow Robot
82201d0e48 Add maxNoOfPodsToEvictPerNamespace policy 2021-12-03 10:58:37 +08:00
Kubernetes Prow Robot
2b95332e8c Merge pull request #665 from spiffxp/use-k8s-infra-for-gcb-image
images: use k8s-staging-test-infra/gcb-docker-gcloud
2021-11-30 13:59:01 -08:00
Aaron Crickenberger
e8ed62e540 images: use k8s-staging-test-infra/gcb-docker-gcloud 2021-11-30 13:12:18 -08:00
Kubernetes Prow Robot
e5725de7bb Merge pull request #664 from stpabhi/dev
fix typo minPodLifeTimeSeconds
2021-11-30 08:10:56 -08:00
Abhilash Pallerlamudi
c47e811937 fix typo minPodLifeTimeSeconds
Signed-off-by: Abhilash Pallerlamudi <stp.abhi@gmail.com>
2021-11-29 17:51:40 -08:00
Kubernetes Prow Robot
e0bac4c371 Merge pull request #662 from ingvagabund/drop-deprecated-flags
Drop deprecated flags
2021-11-29 08:43:23 -08:00
Jan Chaloupka
73a7adf572 Drop deprecated flags 2021-11-29 17:12:59 +01:00
Kubernetes Prow Robot
5cf381a817 Merge pull request #663 from ingvagabund/bump-go-to-1.17
Bump go version in go.mod to go1.17
2021-11-29 08:01:23 -08:00
Jan Chaloupka
4603182320 Bump go version in go.mod to go1.17 2021-11-29 16:49:35 +01:00
Martin Magakian
ad207775ff Adding 'affinity' support to run 'descheduler' in CronJob or Deployment 2021-11-18 11:08:36 +01:00
Kubernetes Prow Robot
d0f11a41c0 Merge pull request #639 from JaneLiuL/master
Ignore Pods With Deletion Timestamp
2021-11-15 08:44:49 -08:00
Jane Liu L
c7524705b3 Ignore Pods With Deletion Timestamp 2021-11-10 09:32:11 +08:00
Kubernetes Prow Robot
50f9513cbb Merge pull request #642 from wking/clarify-RemovePodsHavingTooManyRestarts
README: Clarify podRestartThreshold applying to the sum over containers
2021-10-13 03:15:49 -07:00
W. Trevor King
6fd80ba29c README: Clarify podRestartThreshold applying to the sum over containers
calcContainerRestarts sums over containers.  The new language makes
that clear, avoiding potential confusion vs. an altenative that looked
for pods where a single container had passed the configured threshold.
For example, with three containers with 50 restarts and a threshold of
100, the actual "sum over containers" logic makes that pod a candidate
for descheduling, but the "largest single container restart count"
hypothetical would not have made it a candidate.

Also shifts labelSelector into the parameter table, because when it
was added in 29ade13ce7 (README and e2e-testcase add for
labelSelector, 2021-03-02, #510), it landed a few lines too high.
2021-10-07 14:51:26 -07:00
Kubernetes Prow Robot
5b557941fa Merge pull request #627 from JaneLiuL/master
Add E2E test case cover duplicatepods strategy
2021-10-01 00:01:22 -07:00
Kubernetes Prow Robot
c6229934a0 Merge pull request #637 from KohlsTechnology/helm-suspend-docs
Document suspend helm chart configuration option
2021-09-30 23:47:22 -07:00
Sean Malloy
ed28eaeccc Document suspend helm chart configuration option 2021-09-30 23:30:10 -05:00
Kubernetes Prow Robot
3be910c238 Merge pull request #621 from uthark/oatamanenko/deleted
Ignore pods being deleted
2021-09-30 21:21:22 -07:00
Kubernetes Prow Robot
d96dd6da2d Merge pull request #632 from a7i/amir/failedpods-crash
RemoveFailedPods: guard against nil descheduler strategy (e.g. in case of default that loads all strategies)
2021-09-29 01:02:49 -07:00
Amir Alavi
f7c26ef41f e2e tests for RemoveFailedPods strategy
Fix priority class default
2021-09-26 20:39:32 -04:00
Jane Liu L
57ad9cc91b Add E2E test case cover tooManyRestarts strategy 2021-09-26 09:10:17 +08:00
Kubernetes Prow Robot
926339594d Merge pull request #622 from yutachaos/feature/added_suspend_parameter
Added support for cronjob suspend
2021-09-22 09:18:01 -07:00
Amir Alavi
1ba53ad68c e2e TestTopologySpreadConstraint: ensure pods are running before checking for topology spread across domains 2021-09-20 18:18:47 -04:00
Amir Alavi
6eb37ce079 RemoveFailedPods: guard against nil descheduler strategy (e.g. in case of default that loads all strategies) 2021-09-20 11:20:54 -04:00
Kubernetes Prow Robot
54d660eee0 Merge pull request #629 from chenkaiyue/fix-node-affinity-test
fix duplicate code in node_affinity_test.go
2021-09-16 01:59:46 -07:00
yutachaos
cf219fbfae Added helm chart suspend parameter
Signed-off-by: yutachaos <18604471+yutachaos@users.noreply.github.com>
2021-09-16 14:32:09 +09:00
kaiyuechen
d1d9ea0c48 fix duplicate code in node_affinity_test.go 2021-09-16 10:39:52 +08:00
Oleg Atamanenko
4448d9c670 Ignore pods being deleted 2021-09-15 00:05:51 -07:00
Kubernetes Prow Robot
3909f3acae Merge pull request #623 from damemi/release-1.22
Update Helm chart version to 0.22.0
2021-09-08 13:13:56 -07:00
Mike Dame
9f1274f3ab Update Helm chart version to 0.22.0 2021-09-08 16:00:56 -04:00
Kubernetes Prow Robot
e6926e11ea Merge pull request #617 from KohlsTechnology/docs-1.22
Update Docs and Manifests for v0.22.0
2021-08-31 09:31:37 -07:00
Sean Malloy
16228c9dd1 Update Docs and Manifests for v0.22.0
* Added v0.22 references to README
* Update k8s manifests with v0.22.0 references
* Added table with list of supported architectures by release
2021-08-31 00:24:18 -05:00
Kubernetes Prow Robot
57dabbca5c Merge pull request #597 from a7i/amir/e2e-topology
Add e2e tests for TopologySpreadConstraint
2021-08-30 22:00:28 -07:00
Kubernetes Prow Robot
04439c6e64 Merge pull request #610 from a7i/failed-pods
Introduce RemoveFailedPods strategy
2021-08-30 11:30:09 -07:00
Amir Alavi
0e0e688fe8 Introduce RemoveFailedPods strategy 2021-08-30 14:17:52 -04:00
Kubernetes Prow Robot
0603de4353 Merge pull request #613 from derdanne/derdanne/highnodeutilization-readme-591
nodeutilization strategy: "numberOfNodes" should work as documented
2021-08-24 08:29:14 -07:00
Kubernetes Prow Robot
ea911db6dc Merge pull request #615 from KohlsTechnology/bump-1.22
Bump To k8s 1.22.0
2021-08-19 07:07:24 -07:00
Sean Malloy
c079c7aaae Bump To k8s 1.22.0 2021-08-19 00:40:38 -05:00
Kubernetes Prow Robot
5420988a28 Merge pull request #614 from KohlsTechnology/bump-go-version
Bump To Go 1.16.7
2021-08-18 02:10:08 -07:00
Sean Malloy
b56a1ab80a Bump To Go 1.16.7
The main k/k repo was updated to Go 1.16.7 for k8s
v1.22.0 release. See below PR for reference.

https://github.com/kubernetes/kubernetes/pull/104200
2021-08-17 23:47:45 -05:00
Daniel Klockenkämper
a6b34c1130 nodeutilization strategy: "numberOfNodes" should work as documented 2021-08-17 13:29:07 +00:00
Amir Alavi
0de8002b7d Update gce scripts to spread nodes over 2 zones 2021-08-16 22:41:37 -04:00
Amir Alavi
84d648ff60 Add e2e tests for TopologySpreadConstraint 2021-08-16 22:39:31 -04:00
Kubernetes Prow Robot
6ad6f6fce5 Merge pull request #592 from chrisjohnson00/issue-591
docs: adding clarification to HighNodeUtilization's purpose
2021-08-12 21:38:22 -07:00
Kubernetes Prow Robot
b7100ad871 Merge pull request #582 from praxist/helm-deployment
Update helm chart for running as deployment
2021-08-12 07:29:47 -07:00
Matthew Leung
38c0f1c639 Update helm chart for running as deployment 2021-08-11 11:56:14 -07:00
Kubernetes Prow Robot
64d7901d82 Merge pull request #602 from a7i/balance-domains-aboveavg
TopologySpreadConstraint: advance above avg index when at ideal average
2021-08-10 05:47:25 -07:00
Kubernetes Prow Robot
ab1015e5fa Merge pull request #599 from a7i/amir/update-gce-images
Update GCE images to Ubuntu 18.04 LTS
2021-08-10 05:47:18 -07:00
Kubernetes Prow Robot
1753bf4422 Merge pull request #611 from a7i/docs-remove-redundant-strategies
Update README to remove redundant list of strategies
2021-08-02 08:11:22 -07:00
Amir Alavi
7cb44dca27 Update README to remove redundant list of strategies 2021-07-30 16:35:00 -04:00
Amir Alavi
2ec4b8b674 Update GCE images to Ubuntu 18.04 LTS 2021-07-30 16:14:20 -04:00
Kubernetes Prow Robot
0d0633488d Merge pull request #598 from jayonlau/clenup
Clean up extra spaces
2021-07-30 11:11:37 -07:00
Kubernetes Prow Robot
f3b3853d9d Merge pull request #562 from wsscc2021/helm-chart-tolerations
Add tolerations to cronjob in helm chart
2021-07-30 10:43:38 -07:00
Kubernetes Prow Robot
e550e5e22a Merge pull request #600 from a7i/topology-example
Add example for RemovePodsViolatingTopologySpreadConstraint
2021-07-28 05:37:34 -07:00
Kubernetes Prow Robot
2bf37ff495 Merge pull request #608 from a7i/master
Remove kind binary from repo
2021-07-28 01:13:35 -07:00
Amir Alavi
fa84ec6774 Remove kind binary from repo and add to gitignore 2021-07-27 08:28:55 -04:00
Kubernetes Prow Robot
e18e0416b1 Merge pull request #607 from a7i/fix-helm-test
Place bash shebang at the top of the script + Ensure Helm installed for run-helm-tests
2021-07-27 00:40:45 -07:00
Amir Alavi
34282162f8 Wait for job to start/finish in helm-test script 2021-07-26 23:53:36 -04:00
Amir Alavi
7a043d31be Ensure helm is installed and move shebang to top of file 2021-07-26 16:16:20 -04:00
Amir Alavi
b0e5d64bd7 TopologySpreadConstraint: advance above avg index when at ideal average when balancing domains 2021-07-13 22:55:10 -04:00
Amir Alavi
1c9ac2daee Add example for RemovePodsViolatingTopologySpreadConstraint 2021-07-09 22:56:59 -04:00
jayonlau
c6b67e8a6f Clean up extra spaces
Clean up extra spaces, although these errors are not important, they affect the code specification.
2021-07-09 15:40:35 +08:00
Kubernetes Prow Robot
2e4873d103 Merge pull request #594 from ikarldasan/master-to-main
Rename master to main
2021-07-06 05:50:18 -07:00
Karl Dasan
3e483c4d85 Rename master to main 2021-07-01 13:48:50 +05:30
Kubernetes Prow Robot
032ea70380 Merge pull request #549 from pravarag/verify-defaulters-gen
Add verify scripts for defaulters generator
2021-06-30 04:05:03 -07:00
Pravar Agrawal
df84dc4548 add verify script for defaulters gen 2021-06-30 09:41:43 +05:30
Chris Johnson
d4fa83f8bc chore: PR feedback 2021-06-25 12:30:28 -07:00
Chris Johnson
448dbceadd docs: adding clarification to HighNodeUtilization's purpose 2021-06-24 16:20:21 -07:00
Kubernetes Prow Robot
b83b064992 Merge pull request #589 from a7i/remove-kind-bin
Remove kind binary from repo
2021-06-16 00:19:59 -07:00
Amir Alavi
133a0049e3 Remove kind binary from repo 2021-06-15 18:29:06 -04:00
Kubernetes Prow Robot
50b1c1337d Merge pull request #561 from mekza/support_image_pull_secrets
Add support for private registry creds
2021-06-10 23:28:58 -07:00
Martin-Zack Mekkaoui
d5deed44ca Add support for private registry creds 2021-06-09 22:11:37 +02:00
Kubernetes Prow Robot
0f785b9530 Merge pull request #584 from damemi/update-go-badge
Update Go report card badge
2021-06-08 10:48:08 -07:00
Mike Dame
eb1f0ecc14 Update Go report card badge 2021-06-08 13:39:53 -04:00
Kubernetes Prow Robot
b59995eeb8 Merge pull request #583 from ingvagabund/highnodeutil-nodefit
HighNodeUtilization: add NodeFit feature
2021-06-08 08:15:13 -07:00
Jan Chaloupka
d998d82357 HighNodeUtilization: add NodeFit feature 2021-06-08 16:59:43 +02:00
Kubernetes Prow Robot
f51ea72ce0 Merge pull request #577 from a7i/amira/cronjob-ga
Use stable batch/v1 API Group for Kubernetes 1.21
2021-06-08 06:03:13 -07:00
Kubernetes Prow Robot
fe8d4c0d21 Merge pull request #572 from audip/feature/add-deployment-k8s-yaml-files
Add run descheduler as deployment
2021-06-08 05:47:14 -07:00
Kubernetes Prow Robot
3843a2d5d1 Merge pull request #550 from hanumanthan/highnodeutilisation
Highnodeutilization strategy
2021-06-08 01:59:12 -07:00
Kubernetes Prow Robot
839a237d6a Merge pull request #581 from jsravn/patch-1
Remove namespace from ClusterRoleBinding
2021-06-07 07:16:40 -07:00
Hanu
4cd1e66ef3 Adding highnodeutilization strategy 2021-06-06 18:01:42 +08:00
Hanu
2f18864fa5 Refractor - Modify the common functions to be used by high utilisation 2021-06-06 18:00:43 +08:00
Hanu
6e71068f4f Refractoring lownodeutilization - extracting common functions 2021-06-06 18:00:29 +08:00
James Ravn
e40620effa Remove namespace from ClusterRoleBinding
It's not namespace scoped. This breaks some tools like kpt.
2021-06-04 11:04:14 +01:00
Kubernetes Prow Robot
d7dc0abd7b Merge pull request #576 from a7i/amira/topology-spread-label-filter
Filter pods by labelSelector during eviction for TopologySpreadConstraint strategy
2021-06-01 23:38:37 -07:00
Amir Alavi
012ca2398f Filter pods by labelSelector during eviction for TopologySpreadConstraint strategy 2021-06-01 15:42:23 -04:00
Amir Alavi
f07089d7b3 Bump Helm Chart, kind, and Kubernetes version for helm-test 2021-06-01 12:44:05 -04:00
Amir Alavi
a54b59f208 Use stable batch/v1 API Group for Kubernetes 1.21 2021-06-01 12:44:05 -04:00
Kubernetes Prow Robot
bfd5feaf60 Merge pull request #559 from RyanDevlin/nodeFit
Working nodeFit feature
2021-05-31 09:06:26 -07:00
RyanDevlin
41d46d0d3b Working nodeFit feature 2021-05-24 09:03:38 -04:00
Aditya Purandare
646c13ae15 Fix grammar and indentation issue for deployment resource 2021-05-21 12:42:51 -07:00
Kubernetes Prow Robot
3b9d3d9719 Merge pull request #563 from ingvagabund/removeduplicates-take-taints-into-account-for-node-count
RemoveDuplicates: take node taints, node affinity and node selector into account when computing a number of feasible nodes for the average occurence of pods per node
2021-05-21 05:38:46 -07:00
Aditya Purandare
449383caa3 Add run descheduler as deployment files and update README 2021-05-20 17:04:05 -07:00
Kubernetes Prow Robot
31fd097c0a Merge pull request #527 from pravarag/add-helm-test
Add helm test
2021-05-19 06:10:51 -07:00
Kubernetes Prow Robot
11143d5b2c Merge pull request #570 from KohlsTechnology/bump-kind-version
Bump kind version
2021-05-19 05:40:51 -07:00
Sean Malloy
8480e03e9c Fail unit and e2e tests on any errors 2021-05-19 00:35:16 -05:00
Sean Malloy
0397425010 Bump Kind To v0.11.0
This is required for running e2e tests for k8s v1.21.
2021-05-18 23:37:10 -05:00
Jan Chaloupka
5396282e3d RemoveDuplicates: take node taints, node affinity and node selector into account when computing a number of feasible nodes for the average occurence of pods per node
Nodes with taints which are not tolerated by evicted pods will never run the
pods. The same holds for node affinity and node selector.
So increase the number of pods per feasible nodes to decrease the
number of evicted pods.
2021-05-18 16:13:24 +02:00
Kubernetes Prow Robot
a9ff644de6 Merge pull request #568 from BinacsLee/binacs-pkg-descheduler-descheduler_test-fix-errorhandling
Add verify script for govet & fix pkg/descheduler/descheduler_test.go
2021-05-17 08:18:23 -07:00
BinacsLee
fe8e17f72c fix staticcheck failure for pkg/descheduler/descheduler_test.go 2021-05-17 23:07:12 +08:00
Kubernetes Prow Robot
a1709e9edd Merge pull request #567 from a7i/topology-taint-toleration
RemovePodsViolatingTopologySpreadConstraint : Take node's taints into consideration when balancing domains
2021-05-14 12:41:58 -07:00
Amir Alavi
24c0ca2ef9 Take node's taints into consideration when balancing domains 2021-05-14 15:23:58 -04:00
Kubernetes Prow Robot
9b26abd538 Merge pull request #565 from damemi/issue-564
Add test cases for soft constraints/multi constraints
2021-05-12 19:14:24 -07:00
Mike Dame
fc83c13166 Add test cases for soft constraints/multi constraints 2021-05-12 08:36:07 -04:00
Kubernetes Prow Robot
9b69962053 Merge pull request #535 from ingvagabund/e2e-refactor
E2e refactor
2021-05-11 06:33:36 -07:00
Jan Chaloupka
4edbecc85d Define NodeSelectorsEqual predicate 2021-05-09 18:08:32 +02:00
Jan Chaloupka
54f67266bb Define TolerationsEqual 2021-05-09 18:08:27 +02:00
Donggeun Lee
4ba48b018c Add tolerations to cronjob in helm chart 2021-05-04 19:56:08 +09:00
Kubernetes Prow Robot
2a3529c543 Merge pull request #560 from damemi/update-verify-messages
Update error messages in verify scripts to be more informative
2021-04-30 20:05:59 -07:00
Mike Dame
58408d710b Update error messages in verify scripts to be more informative 2021-04-30 16:56:16 -04:00
Kubernetes Prow Robot
161f66a12f Merge pull request #558 from KohlsTechnology/structured-logging
Use Structured Logging For Unknown Strategy Log Message
2021-04-28 23:28:50 -07:00
Sean Malloy
6bde95c9a1 Use Structured Logging For Unknown Strategy Log Message
Always use structured logging. Therefore update klog.Errorf() to instead
use klog.ErrorS().

Here is an example of the new log message.

E0428 23:58:57.048912 586 descheduler.go:145] "skipping strategy" err="unknown strategy name" strategy=ASDFPodLifeTime
2021-04-29 00:00:07 -05:00
Kubernetes Prow Robot
724ff8a188 Merge pull request #556 from damemi/change-main-loop
Invert main strategy loop for performance and customizability
2021-04-28 07:52:50 -07:00
Mike Dame
feae158a50 Invert main strategy loop for performance and customizability 2021-04-28 10:36:02 -04:00
Kubernetes Prow Robot
780ac7a51e Merge pull request #554 from BinacsLee/binacs-utils-predicates-cleanup
code cleanup: remove check on length
2021-04-26 07:19:01 -07:00
BinacsLee
c4afb6bb30 code cleanup: remove check on length 2021-04-25 21:44:20 +08:00
Pravar Agrawal
8b5c4e805d update docs with helm test info 2021-04-15 15:56:42 +05:30
Jan Chaloupka
f4e24a408f Drop klog 2021-04-14 09:03:38 +02:00
Jan Chaloupka
2781106d49 TestEvictAnnotation: replace LowNodeUtilization strategy with PodLifetime
PodLifetime is simpler in validating results
2021-04-14 09:03:31 +02:00
Jan Chaloupka
534a30a058 e2e: deleteRC: replace loop with wait.PollImmediate 2021-04-14 09:03:26 +02:00
Jan Chaloupka
bb55741320 Update vendor 2021-04-14 09:03:21 +02:00
Jan Chaloupka
079bd6157b e2e: TestLowNodeUtilization: normalize nodes before running the strategy 2021-04-14 09:03:15 +02:00
Pravar Agrawal
92cb1b23ed add helm-test configurations 2021-04-14 10:29:31 +05:30
Kubernetes Prow Robot
832facc526 Merge pull request #537 from KohlsTechnology/docs-0.21.0
Update Docs and Manifests for v0.21.0
2021-04-13 08:24:52 -07:00
Kubernetes Prow Robot
c4fa6c472f Merge pull request #548 from lx1036/feature/fix-testcase
Updating policy api version used in pod evictor
2021-04-13 08:12:50 -07:00
Xiang Liu
a848dac3cf Updating policy api version used in pod evictor 2021-04-13 11:02:01 +08:00
Kubernetes Prow Robot
43a2ccf9c4 Merge pull request #546 from ingvagabund/add-diagram
Add diagram of strategies
2021-04-12 13:38:15 -07:00
Jan Chaloupka
60cf3aeb95 Add diagram of strategies 2021-04-12 09:51:54 +02:00
Kubernetes Prow Robot
84b174e841 Merge pull request #547 from KohlsTechnology/bump-1.21.0
Bump To k8s 1.21.0
2021-04-08 23:00:48 -07:00
Sean Malloy
40337d064d Bump To k8s 1.21.0 2021-04-08 23:36:06 -05:00
Kubernetes Prow Robot
9fe585c854 Merge pull request #545 from pravarag/add-verify-script-deep-copies
Add verify script for deep-copies generator
2021-04-08 21:20:35 -07:00
Pravar Agrawal
4fce2ca2f1 add verify script for deep-copies gen 2021-04-08 22:51:24 +05:30
Kubernetes Prow Robot
4c11de0403 Merge pull request #507 from pravarag/add-verify-scripts
Add verify scripts for make gen to run during PR
2021-04-07 21:41:41 -07:00
Pravar Agrawal
a9099efc45 add verify scripts for conversions gen
Signed-off-by: Pravar Agrawal <pravaag1@in.ibm.com>
2021-04-08 09:56:32 +05:30
Kubernetes Prow Robot
6edb644f2e Merge pull request #544 from ingvagabund/lnu-improve-node-usage-logging
LNU: improve nodeUsage logging
2021-04-06 23:45:53 -07:00
Jan Chaloupka
c239e1199f LNU: improve nodeUsage logging
To avoid:
```
I0210 11:56:04.137956 3309277 lownodeutilization.go:389] "Updated node usage" updatedUsage={node:0xc000460000 usage:map[cpu:0xc00042b480 memory:0xc00042b4c0 pods:0xc00042b500] allPods:[0xc0004a0000 0xc0004a03e8 0xc0004a07d0 0xc0004a0bb8 0xc0004a0fa0 0xc0004a1388 0xc0004a1770 0xc0004a1b58] lowResourceThreshold:map[cpu:0xc00042b540 memory:0xc00042b580 pods:0xc00042b5c0] highResourceThreshold:map[cpu:0xc00042b600 memory:0xc00042b640 pods:0xc00042b680]}
I0210 11:56:04.138829 3309277 lownodeutilization.go:389] "Updated node usage" updatedUsage={node:0xc000460000 usage:map[cpu:0xc00042b480 memory:0xc00042b4c0 pods:0xc00042b500] allPods:[0xc0004a0000 0xc0004a03e8 0xc0004a07d0 0xc0004a0bb8 0xc0004a0fa0 0xc0004a1388 0xc0004a1770 0xc0004a1b58] lowResourceThreshold:map[cpu:0xc00042b540 memory:0xc00042b580 pods:0xc00042b5c0] highResourceThreshold:map[cpu:0xc00042b600 memory:0xc00042b640 pods:0xc00042b680]}
I0210 11:56:04.139044 3309277 lownodeutilization.go:389] "Updated node usage" updatedUsage={node:0xc000460000 usage:map[cpu:0xc00042b480 memory:0xc00042b4c0 pods:0xc00042b500] allPods:[0xc0004a0000 0xc0004a03e8 0xc0004a07d0 0xc0004a0bb8 0xc0004a0fa0 0xc0004a1388 0xc0004a1770 0xc0004a1b58] lowResourceThreshold:map[cpu:0xc00042b540 memory:0xc00042b580 pods:0xc00042b5c0] highResourceThreshold:map[cpu:0xc00042b600 memory:0xc00042b640 pods:0xc00042b680]}
```
2021-04-06 09:43:23 +02:00
Kubernetes Prow Robot
b713b7852a Merge pull request #434 from ZongqiangZhang/extend-resources
Support extended resources in LowNodeUtilization
2021-04-06 00:21:34 -07:00
Kubernetes Prow Robot
5d07d0c8e2 Merge pull request #543 from gaurav1086/e2e_test_fix_goroutine_leak
e2e_test: fix goroutine leak
2021-04-05 18:11:19 -07:00
Gaurav Singh
7076ba0760 e2e_test: fix goroutine leak 2021-04-04 21:19:56 -04:00
ZongqiangZhang
81b816d4a4 support extended resources in lownodeutilization 2021-04-02 21:37:51 +08:00
Kubernetes Prow Robot
9ebc909c7f Merge pull request #541 from KohlsTechnology/make-gen
Update Generated Code
2021-04-01 00:39:22 -07:00
Sean Malloy
af01b675b0 Update Generated Code
Ran "make gen" using Go 1.16.1. Some changes were merged, but "make gen"
was not run. This fixes the problem.

See below PR for reference:
https://github.com/kubernetes-sigs/descheduler/pull/523
2021-04-01 00:28:20 -05:00
Kubernetes Prow Robot
ce6ce5a058 Merge pull request #539 from damemi/1.21-rc.0
Bump to k8s 1.21-rc.0
2021-03-31 16:55:21 -07:00
Mike Dame
bd4f6d4fcd Bump to k8s 1.21-rc.0 2021-03-31 10:22:56 -04:00
Sean Malloy
6a4181158a Update Docs and Manifests for v0.21.0
* Added v0.21 references to README
* Update k8s manifests with v0.21.0 references
* Added table with list of supported architectures by release
2021-03-31 00:53:19 -05:00
Kubernetes Prow Robot
a2746d09e8 Merge pull request #523 from RyanDevlin/evict-critical
Added EvictSystemCriticalPods flag to descheduler
2021-03-30 10:41:57 -07:00
RyanDevlin
b5d7219391 Completed evictSystemCriticalPods feature 2021-03-29 23:13:05 -04:00
Kubernetes Prow Robot
b09d5d99dc Merge pull request #534 from KohlsTechnology/kustomize-docs
Use Tags In Kustomize Documentation
2021-03-27 23:46:44 -07:00
Sean Malloy
dbcc20f37f Use Tags In Kustomize Documentation
The master branch always represents the next release of the
descheduler. Therefore applying the descheduler k8s manifests
from the master branch is not considered stable. It is best for
users to install descheduler using the released tags.
2021-03-27 01:21:08 -05:00
Kubernetes Prow Robot
51340b56b8 Merge pull request #533 from ingvagabund/bump-to-go1.16
Bump go to 1.16
2021-03-26 23:20:43 -07:00
Jan Chaloupka
160669817e Bump go to 1.16 2021-03-25 10:13:40 +01:00
Kubernetes Prow Robot
6ca4479892 Merge pull request #520 from KohlsTechnology/statefulset-docs
Document That Descheduler Considers StatefulSets For Eviction
2021-03-23 08:15:36 -07:00
Sean Malloy
92740a25d4 Add Initial Unit Tests For StatefulSets 2021-03-23 09:24:08 -05:00
Sean Malloy
56e4daccaf Document That Descheduler Considers StatefulSets For Eviction
Similar to ReplicaSet, ReplicationController, and Jobs pods with a
StatefulSet metadata.ownerReference are considered for eviction.
Document this, so that it is clear to end users.
2021-03-23 09:24:08 -05:00
Kubernetes Prow Robot
b546832b66 Merge pull request #526 from KohlsTechnology/chart-docs-fix
Correct Helm Chart Docs For Container Requests and Limits
2021-03-15 08:59:05 -07:00
Kubernetes Prow Robot
39e5b34af3 Merge pull request #525 from damemi/topology-spread-selector-fix
(TopologySpread) Evict pods with selectors that match multiple nodes
2021-03-14 01:35:04 -08:00
Sean Malloy
e699e08d13 Correct Helm Chart Docs For Container Requests and Limits
The resources.cpuRequest and resources.memoryRequest varialbes are
not valid in the helm chart values.yaml file. The correct varialbe
name for setting the requests and limits is resources.

Also, fixed white space alignment in the markdown table.
2021-03-12 23:17:02 -06:00
Mike Dame
af26b57e5e (TopologySpread) Evict pods with selectors that match multiple nodes 2021-03-12 13:41:17 -05:00
Kubernetes Prow Robot
22fe589ae6 Merge pull request #508 from KohlsTechnology/emeritus-approvers
Move Inactive Maintainers to Emeritus Status
2021-03-12 09:06:18 -08:00
Kubernetes Prow Robot
0a11b5a138 Merge pull request #521 from KohlsTechnology/armv7
Enable ARM32v7 Container Image Builds
2021-03-09 17:07:13 -08:00
Sean Malloy
363f02710b Enable ARM32v7 Container Image Builds
Add ARM32v7 in addition to the currently supported architectures. This
will allow running descheduler on Raspberry Pi v3 devices.
2021-03-09 09:03:58 -06:00
Kubernetes Prow Robot
6abfa232e7 Merge pull request #518 from damemi/sigs-k8s-mdtoc
Add table of contents generator/verify script
2021-03-08 12:06:59 -08:00
Mike Dame
bbfb12a120 Run hack/update-toc.sh 2021-03-08 14:39:08 -05:00
Mike Dame
5df2a0c516 Add hack scripts and makefile targets 2021-03-08 14:39:08 -05:00
Mike Dame
8ecd14289a Add <toc> markers to README 2021-03-08 10:51:55 -05:00
Mike Dame
131ed42a4c Add sigs.k8s.io/mdtoc dependency 2021-03-08 10:46:27 -05:00
Kubernetes Prow Robot
6b8d4cd5a7 Merge pull request #517 from lixiang233/fix_topology_log
Correct log in topology spread strategy
2021-03-07 11:31:42 -08:00
Kubernetes Prow Robot
24a06511a2 Merge pull request #505 from ingvagabund/collect-metrics
Collect metrics
2021-03-06 21:41:42 -08:00
lixiang
09c7d1be0a Correct log in topology spread strategy 2021-03-06 16:21:46 +08:00
Kubernetes Prow Robot
f5666882de Merge pull request #515 from damemi/clarify-pod-percentage-readme
Clarify resource percentage calculation in README
2021-03-05 07:36:22 -08:00
Jan Chaloupka
701f22404b Serve secure metrics at 10258 2021-03-05 16:35:45 +01:00
Mike Dame
d5fa60bdd5 Clarify resource percentage calculation in README 2021-03-05 09:34:04 -05:00
Kubernetes Prow Robot
9e14f733b7 Merge pull request #510 from lixiang233/Ft_filter_by_label
Filter pods by labelSelector
2021-03-05 01:14:22 -08:00
lixiang
29ade13ce7 README and e2e-testcase add for labelSelector 2021-03-04 21:32:14 +08:00
lixiang
03518badb8 Strategies: Add labelSelector to all strategies except LowNodeUtilization, RemoveDuplicates and RemovePodsViolatingTopologySpreadConstraint. 2021-03-04 21:30:50 +08:00
Jan Chaloupka
24458fb0ca Increase pods_evicted metric 2021-03-03 16:15:21 +01:00
Jan Chaloupka
1c5b32763b Register metrics
New metrics:
- build_info: Build info about descheduler, including Go version, Descheduler version, Git SHA, Git branch
- pods_evicted: Number of successfully evicted pods, by the result, by the strategy, by the namespace
2021-03-03 16:15:09 +01:00
Jan Chaloupka
3bd031bbb3 Move build's versioning bits under pkg/version 2021-03-03 15:56:53 +01:00
Jan Chaloupka
ea6e9f22b9 Vendor metrics 2021-03-03 15:56:48 +01:00
Kubernetes Prow Robot
73309a3948 Merge pull request #490 from damemi/logging-format-default
Add default logging-format value
2021-03-03 06:49:20 -08:00
Kubernetes Prow Robot
105313a0e3 Merge pull request #512 from ingvagabund/bump-gogo-protobuf-to-1.3.2
Bump github.com/gogo/protobuf to v1.3.2
2021-03-02 07:05:19 -08:00
Jan Chaloupka
d368cbed32 Bump github.com/gogo/protobuf to v1.3.2
Fixes https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-3121
2021-03-02 14:48:32 +01:00
Sean Malloy
5f4dfbc922 Update Maintainer Field in Dockerfiles
Set the maintainer field to use the more generic SIG scheduling name and
email address.
2021-03-01 23:19:42 -06:00
Sean Malloy
967911e4c1 Move Inactive Maintainers to Emeritus Status
For roughly the past year damemi has been the only active approver for
the descheduler. Therefore move the inactive approvers to emeritus
status. This will help clarify to contributors who should be assigned to
pull requests.
2021-03-01 09:23:49 -06:00
Kubernetes Prow Robot
726091712d Merge pull request #509 from KohlsTechnology/update-reviwers
Add lixiang233 As A Reviewer
2021-03-01 06:45:25 -08:00
lixiang
854afa7c73 PodsListing: Add WithLabelSelector option. 2021-03-01 11:13:44 +08:00
lixiang
2517268b1f API: Add a new parameter labelSelector to StrategyParameters. 2021-02-26 15:49:00 +08:00
Sean Malloy
fea8beabab Add lixiang233 As A Reviewer 2021-02-25 22:03:50 -06:00
Kubernetes Prow Robot
7f9c95fa16 Merge pull request #506 from KohlsTechnology/kind-0.10.0
Update e2e tests to use kind v0.10.0
2021-02-25 07:29:27 -08:00
Sean Malloy
6f7e2a271e Update e2e tests to use kind v0.10.0 2021-02-25 00:17:06 -06:00
Kubernetes Prow Robot
217ebdfa73 Merge pull request #504 from lixiang233/Fix_lowNodeUtilization_log
Log and README optimization for LowNodeUtilization
2021-02-24 05:22:50 -08:00
lixiang
e014fda58e Log and README optimization for LowNodeUtilization 2021-02-24 09:17:07 +08:00
Kubernetes Prow Robot
6d0360fd16 Merge pull request #502 from damemi/node-affinity-totalevicted
Move 'total pods evicted' log message to main loop
2021-02-19 13:45:41 -08:00
Mike Dame
01a87b6143 Move 'total pods evicted' log message to main loop 2021-02-19 10:37:43 -05:00
Kubernetes Prow Robot
f3e871492c Merge pull request #499 from KohlsTechnology/go-1.15.8
Bump To Go 1.15.8
2021-02-18 05:52:57 -08:00
Kubernetes Prow Robot
bf8d744686 Merge pull request #498 from kristinnardal2/patch-1
Fix indentation in values.yaml
2021-02-18 05:52:51 -08:00
Kubernetes Prow Robot
477d12968e Merge pull request #496 from KohlsTechnology/nonroot-helm-chart
Update Helm Chart to run as non-root
2021-02-18 00:34:51 -08:00
Sean Malloy
dcb4136a96 Bump To Go 1.15.8
The main k/k repo was updated to Go 1.15.8 for the upcoming k8s v1.21.0
release. See below PR for reference.

https://github.com/kubernetes/release/issues/1895
2021-02-18 00:00:31 -06:00
Kristinn Björgvin Árdal
2c0afafccf Fix indentation in values.yaml
A couple of lines had an extra whitespace.
2021-02-17 08:51:26 +01:00
Kubernetes Prow Robot
e56617253f Merge pull request #497 from ingvagabund/resourceUsagePercentages-unit-test
LowNodeUtilization: unit test resourceUsagePercentages to validate percentages are computed correctly
2021-02-11 20:26:48 -08:00
Jan Chaloupka
500aaea4dd LowNodeUtilization: unit test resourceUsagePercentages to validate percentages are computed correctly 2021-02-11 16:04:37 +01:00
Sean Malloy
4444811f26 Update Helm Chart to run as non-root 2021-02-10 21:49:01 -06:00
Kubernetes Prow Robot
b63c6fac27 Merge pull request #495 from KohlsTechnology/nonroot
Update Job and CronJob YAML to run as non-root
2021-02-10 08:22:59 -08:00
Sean Malloy
dfc76906d4 Update Job and CronJob YAML to run as non-root 2021-02-09 23:10:18 -06:00
Kubernetes Prow Robot
fbd17d4caf Merge pull request #480 from damemi/nodeselector-cronjob
Add nodeSelector to cronJob helm chart
2021-02-09 10:32:59 -08:00
Mike Dame
9c01589fb9 Add nodeSelector to cronJob helm chart 2021-02-09 13:27:26 -05:00
Kubernetes Prow Robot
a0942afaa1 Merge pull request #493 from fancc/resource_usage_percentage
Modify resourceUsage.Value() to resourceUsage.MilliValue()
2021-02-02 03:38:28 -08:00
范成城
16fa21a4a6 change resourceUsagePercentage func 2021-01-31 20:46:46 +08:00
Kubernetes Prow Robot
241f1325c9 Merge pull request #481 from damemi/ignore-pvc-pods
Add option to ignore pods with PVCs from eviction
2021-01-26 22:41:40 -08:00
Mike Dame
c1a63a557a Add option to ignore pods with PVCs from eviction 2021-01-26 08:47:54 -05:00
Mike Dame
e45e21368a Add default logging-format value 2021-01-25 14:19:58 -05:00
Kubernetes Prow Robot
f24b367479 Merge pull request #474 from lixiang233/Ft_include_soft_constraints
Add a parameter to include soft topology spread constraints
2021-01-21 09:21:06 -08:00
lixiang
8ba9cb1df7 Add a parameter to include soft topology spread constraints 2021-01-21 11:33:16 +08:00
Kubernetes Prow Robot
d502f05910 Merge pull request #484 from damemi/topology-spread-bug
Fix TopologySpread bug that evicts non-evictable pods
2021-01-12 17:26:36 -08:00
Mike Dame
241f47d947 Fix TopologySpread bug that evicts non-evictable pods 2021-01-12 15:25:55 -05:00
Kubernetes Prow Robot
19424f4119 Merge pull request #482 from damemi/fix-logging-gen
Add Logging field to v1alpha1 componentconfig
2021-01-11 09:44:25 -08:00
Mike Dame
635a40f305 Add Logging field to v1alpha1 componentconfig 2021-01-11 11:21:32 -05:00
Kubernetes Prow Robot
1804d2e3a2 Merge pull request #478 from ryuichi1208/master
Update README.md
2021-01-10 11:43:05 -08:00
Ryuichi Watanabe
fea4870243 Update README.md 2021-01-01 18:45:00 +09:00
Kubernetes Prow Robot
f54df67d11 Merge pull request #471 from Mathew857/master
refactor: remove unused code
2020-12-15 17:20:20 -08:00
wu.chaozong
1a998037f8 refactor: remove unuse param in e2e_test.go 2020-12-16 08:46:40 +08:00
wu.chaozong
c481877c03 refactor: update node_test file 2020-12-15 21:15:33 +08:00
Kubernetes Prow Robot
67df39690b Merge pull request #468 from eatwithforks/jylee/user
Run as user 1000 so pod can be runAsNonRoot for security purposes
2020-12-14 06:43:28 -08:00
wu.chaozong
674f14da78 refactor: remove unused code 2020-12-13 00:09:07 +08:00
Jye Lee
0cfbdf642b Run as user 1000 so pod can be runAsNonRoot for security purposes 2020-12-10 09:37:49 -08:00
4258 changed files with 595644 additions and 67955 deletions

6
.github/ci/ct.yaml vendored Normal file
View File

@@ -0,0 +1,6 @@
chart-dirs:
- charts
helm-extra-args: "--timeout=5m"
check-version-increment: false
helm-extra-set-args: "--set=kind=Deployment"
target-branch: master

67
.github/workflows/helm.yaml vendored Normal file
View File

@@ -0,0 +1,67 @@
name: Helm
on:
push:
branches:
- master
- release-*
paths:
- 'charts/**'
- '.github/workflows/helm.yaml'
pull_request:
paths:
- 'charts/**'
- '.github/workflows/helm.yaml'
jobs:
lint-and-test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v2.1
with:
version: v3.9.2
- uses: actions/setup-python@v3.1.2
with:
python-version: 3.7
- uses: actions/setup-go@v3
with:
go-version: '1.19.0'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.2.1
with:
version: v3.7.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config=.github/ci/ct.yaml)
if [[ -n "$changed" ]]; then
echo "::set-output name=changed::true"
fi
- name: Run chart-testing (lint)
run: ct lint --config=.github/ci/ct.yaml --validate-maintainers=false
# Need a multi node cluster so descheduler runs until evictions
- name: Create multi node Kind cluster
run: make kind-multi-node
# helm-extra-set-args only available after ct 3.6.0
- name: Run chart-testing (install)
run: ct install --config=.github/ci/ct.yaml
- name: E2E after chart install
env:
KUBERNETES_VERSION: "v1.25.0"
KIND_E2E: true
SKIP_INSTALL: true
run: make test-e2e

47
.github/workflows/security.yaml vendored Normal file
View File

@@ -0,0 +1,47 @@
name: "Security"
on:
push:
branches:
- main
- master
- release-*
schedule:
- cron: '30 1 * * 0'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build image
run: |
IMAGE_REPO=${HELM_IMAGE_REPO:-descheduler}
IMAGE_TAG=${HELM_IMAGE_TAG:-security-test}
VERSION=security-test make image
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
with:
image-ref: 'descheduler:security-test'
format: 'sarif'
exit-code: '0'
severity: 'CRITICAL,HIGH'
output: 'trivy-results.sarif'
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@v2
with:
sarif_file: 'trivy-results.sarif'
exit-code: '0'

3
.gitignore vendored
View File

@@ -3,3 +3,6 @@ _tmp/
vendordiff.patch
.idea/
*.code-workspace
.vscode/
kind
bin/

View File

@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.15.5
FROM golang:1.19.0
WORKDIR /go/src/sigs.k8s.io/descheduler
COPY . .
@@ -21,7 +21,9 @@ RUN VERSION=${VERSION} make build.$ARCH
FROM scratch
MAINTAINER Avesh Agarwal <avesh.ncsu@gmail.com>
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
USER 1000
COPY --from=0 /go/src/sigs.k8s.io/descheduler/_output/bin/descheduler /bin/descheduler

View File

@@ -13,7 +13,9 @@
# limitations under the License.
FROM scratch
MAINTAINER Avesh Agarwal <avesh.ncsu@gmail.com>
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
USER 1000
COPY _output/bin/descheduler /bin/descheduler

View File

@@ -14,16 +14,20 @@
.PHONY: test
export CONTAINER_ENGINE ?= docker
# VERSION is based on a date stamp plus the last commit
VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags --match "v*")
VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags)
BRANCH?=$(shell git branch --show-current)
SHA1?=$(shell git rev-parse HEAD)
BUILD=$(shell date +%FT%T%z)
LDFLAG_LOCATION=sigs.k8s.io/descheduler/cmd/descheduler/app
ARCHS = amd64 arm64
LDFLAG_LOCATION=sigs.k8s.io/descheduler/pkg/version
ARCHS = amd64 arm arm64
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD}"
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
GOLANGCI_VERSION := v1.30.0
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint)
GOLANGCI_VERSION := v1.49.0
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
# REGISTRY is the container registry to push
# into. The default is to push to the staging
@@ -41,7 +45,7 @@ IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
# In the future binaries can be uploaded to
# GCS bucket gs://k8s-staging-descheduler.
HAS_HELM := $(shell which helm)
HAS_HELM := $(shell which helm 2> /dev/null)
all: build
@@ -51,42 +55,52 @@ build:
build.amd64:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
build.arm:
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
build.arm64:
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
dev-image: build
docker build -f Dockerfile.dev -t $(IMAGE) .
$(CONTAINER_ENGINE) build -f Dockerfile.dev -t $(IMAGE) .
image:
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE) .
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE) .
image.amd64:
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE)-amd64 .
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE)-amd64 .
image.arm:
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm" -t $(IMAGE)-arm .
image.arm64:
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm64" -t $(IMAGE)-arm64 .
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm64" -t $(IMAGE)-arm64 .
push: image
gcloud auth configure-docker
docker tag $(IMAGE) $(IMAGE_GCLOUD)
docker push $(IMAGE_GCLOUD)
$(CONTAINER_ENGINE) tag $(IMAGE) $(IMAGE_GCLOUD)
$(CONTAINER_ENGINE) push $(IMAGE_GCLOUD)
push-all: image.amd64 image.arm64
push-all: image.amd64 image.arm image.arm64
gcloud auth configure-docker
for arch in $(ARCHS); do \
docker tag $(IMAGE)-$${arch} $(IMAGE_GCLOUD)-$${arch} ;\
docker push $(IMAGE_GCLOUD)-$${arch} ;\
$(CONTAINER_ENGINE) tag $(IMAGE)-$${arch} $(IMAGE_GCLOUD)-$${arch} ;\
$(CONTAINER_ENGINE) push $(IMAGE_GCLOUD)-$${arch} ;\
done
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(IMAGE_GCLOUD) $(addprefix --amend $(IMAGE_GCLOUD)-, $(ARCHS))
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest create $(IMAGE_GCLOUD) $(addprefix --amend $(IMAGE_GCLOUD)-, $(ARCHS))
for arch in $(ARCHS); do \
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest annotate --arch $${arch} $(IMAGE_GCLOUD) $(IMAGE_GCLOUD)-$${arch} ;\
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest annotate --arch $${arch} $(IMAGE_GCLOUD) $(IMAGE_GCLOUD)-$${arch} ;\
done
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push $(IMAGE_GCLOUD) ;\
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest push $(IMAGE_GCLOUD) ;\
clean:
rm -rf _output
rm -rf _tmp
verify: verify-gofmt verify-vendor lint lint-chart verify-spelling
verify: verify-govet verify-spelling verify-gofmt verify-vendor lint lint-chart verify-toc verify-gen
verify-govet:
./hack/verify-govet.sh
verify-spelling:
./hack/verify-spelling.sh
@@ -97,6 +111,9 @@ verify-gofmt:
verify-vendor:
./hack/verify-vendor.sh
verify-toc:
./hack/verify-toc.sh
test-unit:
./test/run-unit-tests.sh
@@ -107,15 +124,37 @@ gen:
./hack/update-generated-conversions.sh
./hack/update-generated-deep-copies.sh
./hack/update-generated-defaulters.sh
./hack/update-toc.sh
verify-gen:
./hack/verify-conversions.sh
./hack/verify-deep-copies.sh
./hack/verify-defaulters.sh
lint:
ifndef HAS_GOLANGCI
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
endif
./_output/bin/golangci-lint run
lint-chart:
# helm
ensure-helm-install:
ifndef HAS_HELM
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
endif
lint-chart: ensure-helm-install
helm lint ./charts/descheduler
build-helm:
helm package ./charts/descheduler --dependency-update --destination ./bin/chart
test-helm: ensure-helm-install
./test/run-helm-tests.sh
kind-multi-node:
kind create cluster --name kind --config ./hack/kind_config.yaml --wait 2m
ct-helm:
./hack/verify-chart.sh

16
OWNERS
View File

@@ -1,12 +1,16 @@
approvers:
- aveshagarwal
- k82cn
- ravisantoshgudimetla
- damemi
- ingvagabund
- seanmalloy
- a7i
reviewers:
- aveshagarwal
- k82cn
- ravisantoshgudimetla
- damemi
- seanmalloy
- ingvagabund
- lixiang233
- a7i
- janeliul
emeritus_approvers:
- aveshagarwal
- k82cn
- ravisantoshgudimetla

481
README.md
View File

@@ -1,4 +1,4 @@
[![Go Report Card](https://goreportcard.com/badge/kubernetes-sigs/descheduler)](https://goreportcard.com/report/sigs.k8s.io/descheduler)
[![Go Report Card](https://goreportcard.com/badge/sigs.k8s.io/descheduler)](https://goreportcard.com/report/sigs.k8s.io/descheduler)
![Release Charts](https://github.com/kubernetes-sigs/descheduler/workflows/Release%20Charts/badge.svg)
# Descheduler for Kubernetes
@@ -24,36 +24,45 @@ but relies on the default scheduler for that.
Table of Contents
=================
* [Quick Start](#quick-start)
* [Run As A Job](#run-as-a-job)
* [Run As A CronJob](#run-as-a-cronjob)
* [Install Using Helm](#install-using-helm)
* [Install Using Kustomize](#install-using-kustomize)
* [User Guide](#user-guide)
* [Policy and Strategies](#policy-and-strategies)
* [RemoveDuplicates](#removeduplicates)
* [LowNodeUtilization](#lownodeutilization)
* [RemovePodsViolatingInterPodAntiAffinity](#removepodsviolatinginterpodantiaffinity)
* [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity)
* [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints)
* [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint)
* [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts)
* [PodLifeTime](#podlifetime)
* [Filter Pods](#filter-pods)
* [Namespace filtering](#namespace-filtering)
* [Priority filtering](#priority-filtering)
* [Pod Evictions](#pod-evictions)
* [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb)
* [Compatibility Matrix](#compatibility-matrix)
* [Getting Involved and Contributing](#getting-involved-and-contributing)
* [Communicating With Contributors](#communicating-with-contributors)
* [Roadmap](#roadmap)
* [Code of conduct](#code-of-conduct)
<!-- toc -->
- [Quick Start](#quick-start)
- [Run As A Job](#run-as-a-job)
- [Run As A CronJob](#run-as-a-cronjob)
- [Run As A Deployment](#run-as-a-deployment)
- [Install Using Helm](#install-using-helm)
- [Install Using Kustomize](#install-using-kustomize)
- [User Guide](#user-guide)
- [Policy and Strategies](#policy-and-strategies)
- [RemoveDuplicates](#removeduplicates)
- [LowNodeUtilization](#lownodeutilization)
- [HighNodeUtilization](#highnodeutilization)
- [RemovePodsViolatingInterPodAntiAffinity](#removepodsviolatinginterpodantiaffinity)
- [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity)
- [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints)
- [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint)
- [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts)
- [PodLifeTime](#podlifetime)
- [RemoveFailedPods](#removefailedpods)
- [Filter Pods](#filter-pods)
- [Namespace filtering](#namespace-filtering)
- [Priority filtering](#priority-filtering)
- [Label filtering](#label-filtering)
- [Node Fit filtering](#node-fit-filtering)
- [Pod Evictions](#pod-evictions)
- [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb)
- [High Availability](#high-availability)
- [Configure HA Mode](#configure-ha-mode)
- [Metrics](#metrics)
- [Compatibility Matrix](#compatibility-matrix)
- [Getting Involved and Contributing](#getting-involved-and-contributing)
- [Communicating With Contributors](#communicating-with-contributors)
- [Roadmap](#roadmap)
- [Code of conduct](#code-of-conduct)
<!-- /toc -->
## Quick Start
The descheduler can be run as a Job or CronJob inside of a k8s cluster. It has the
The descheduler can be run as a `Job`, `CronJob`, or `Deployment` inside of a k8s cluster. It has the
advantage of being able to be run multiple times without needing user intervention.
The descheduler pod is run as a critical pod in the `kube-system` namespace to avoid
being evicted by itself or by the kubelet.
@@ -74,6 +83,14 @@ kubectl create -f kubernetes/base/configmap.yaml
kubectl create -f kubernetes/cronjob/cronjob.yaml
```
### Run As A Deployment
```
kubectl create -f kubernetes/base/rbac.yaml
kubectl create -f kubernetes/base/configmap.yaml
kubectl create -f kubernetes/deployment/deployment.yaml
```
### Install Using Helm
Starting with release v0.18.0 there is an official helm chart that can be used to install the
@@ -84,16 +101,21 @@ The descheduler helm chart is also listed on the [artifact hub](https://artifact
### Install Using Kustomize
You can use kustomize to install descheduler.
See the [resources | Kustomize](https://kubernetes-sigs.github.io/kustomize/api-reference/kustomization/resources/) for detailed instructions.
See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/kustomize/resource/) for detailed instructions.
Run As A Job
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=master' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.25.0' | kubectl apply -f -
```
Run As A CronJob
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=master' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.25.0' | kubectl apply -f -
```
Run As A Deployment
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.25.0' | kubectl apply -f -
```
## User Guide
@@ -102,38 +124,55 @@ See the [user guide](docs/user-guide.md) in the `/docs` directory.
## Policy and Strategies
Descheduler's policy is configurable and includes strategies that can be enabled or disabled.
Eight strategies `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingInterPodAntiAffinity`,
`RemovePodsViolatingNodeAffinity`, `RemovePodsViolatingNodeTaints`, `RemovePodsViolatingTopologySpreadConstraint`,
`RemovePodsHavingTooManyRestarts`, and `PodLifeTime` are currently implemented. As part of the policy, the
parameters associated with the strategies can be configured too. By default, all strategies are enabled.
Descheduler's policy is configurable and includes strategies that can be enabled or disabled. By default, all strategies are enabled.
The policy also includes common configuration for all the strategies:
- `nodeSelector` - limiting the nodes which are processed
- `evictLocalStoragePods` - allowing to evict pods with local storage
- `maxNoOfPodsToEvictPerNode` - maximum number of pods evicted from each node (summed through all strategies)
The policy includes a common configuration that applies to all the strategies:
| Name | Default Value | Description |
|------|---------------|-------------|
| `nodeSelector` | `nil` | limiting the nodes which are processed |
| `evictLocalStoragePods` | `false` | allows eviction of pods with local storage |
| `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
| `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored |
| `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) |
| `maxNoOfPodsToEvictPerNamespace` | `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
| `evictFailedBarePods` | `false` | allow eviction of pods without owner references and in failed phase |
As part of the policy, the parameters associated with each strategy can be configured.
See each strategy for details on available parameters.
**Policy:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
nodeSelector: prod=dev
evictFailedBarePods: false
evictLocalStoragePods: true
evictSystemCriticalPods: true
maxNoOfPodsToEvictPerNode: 40
ignorePvcPods: false
strategies:
...
```
The following diagram provides a visualization of most of the strategies to help
categorize how strategies fit together.
![Strategies diagram](strategies_diagram.png)
### RemoveDuplicates
This strategy makes sure that there is only one pod associated with a Replica Set (RS),
Replication Controller (RC), Deployment, or Job running on the same node. If there are more,
This strategy makes sure that there is only one pod associated with a ReplicaSet (RS),
ReplicationController (RC), StatefulSet, or Job running on the same node. If there are more,
those duplicate pods are evicted for better spreading of pods in a cluster. This issue could happen
if some nodes went down due to whatever reasons, and pods on them were moved to other nodes leading to
more than one pod associated with a RS or RC, for example, running on the same node. Once the failed nodes
are ready again, this strategy could be enabled to evict those duplicate pods.
It provides one optional parameter, `ExcludeOwnerKinds`, which is a list of OwnerRef `Kind`s. If a pod
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
It provides one optional parameter, `excludeOwnerKinds`, which is a list of OwnerRef `Kind`s. If a pod
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction. Note that
pods created by Deployments are considered for eviction by this strategy. The `excludeOwnerKinds` parameter
should include `ReplicaSet` to have pods created by Deployments excluded.
**Parameters:**
@@ -143,6 +182,7 @@ has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considere
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
@@ -164,17 +204,33 @@ in the hope that recreation of evicted pods will be scheduled on these underutil
parameters of this strategy are configured under `nodeResourceUtilizationThresholds`.
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
`thresholds` can be configured for cpu, memory, and number of pods in terms of percentage. If a node's
usage is below threshold for all (cpu, memory, and number of pods), the node is considered underutilized.
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage (the percentage is
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node).
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
Currently, pods request resource requirements are considered for computing node resource utilization.
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
from where pods could be evicted. If a node's usage is above targetThreshold for any (cpu, memory, or number of pods),
from where pods could be evicted. If a node's usage is above targetThreshold for any (cpu, memory, number of pods, or extended resources),
the node is considered over utilized. Any node between the thresholds, `thresholds` and `targetThresholds` is
considered appropriately utilized and is not considered for eviction. The threshold, `targetThresholds`,
can be configured for cpu, memory, and number of pods too in terms of percentage.
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements.
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements. Note that this
strategy evicts pods from `overutilized nodes` (those with usage above `targetThresholds`) to `underutilized nodes`
(those with usage below `thresholds`), it will abort if any number of `underutilized nodes` or `overutilized nodes` is zero.
Additionally, the strategy accepts a `useDeviationThresholds` parameter.
If that parameter is set to `true`, the thresholds are considered as percentage deviations from mean resource usage.
`thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean.
A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization).
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
**Parameters:**
@@ -183,8 +239,10 @@ These thresholds, `thresholds` and `targetThresholds`, could be tuned as per you
|`thresholds`|map(string:int)|
|`targetThresholds`|map(string:int)|
|`numberOfNodes`|int|
|`useDeviationThresholds`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
@@ -207,19 +265,84 @@ strategies:
```
Policy should pass the following validation checks:
* Only three types of resources are supported: `cpu`, `memory` and `pods`.
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`.
If any of these resource types is not specified, all its thresholds default to 100% to avoid nodes going from underutilized to overutilized.
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional,
and will not be used to compute node's usage if it's not specified in `thresholds` and `targetThresholds` explicitly.
* `thresholds` or `targetThresholds` can not be nil and they must configure exactly the same types of resources.
* The valid range of the resource's percentage value is \[0, 100\]
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
If any of the resource types is not specified, all its thresholds default to 100% to avoid nodes going
from underutilized to overutilized.
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
This parameter can be configured to activate the strategy only when the number of under utilized nodes
are above the configured value. This could be helpful in large clusters where a few nodes could go
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
### HighNodeUtilization
This strategy finds nodes that are under utilized and evicts pods from the nodes in the hope that these pods will be
scheduled compactly into fewer nodes. Used in conjunction with node auto-scaling, this strategy is intended to help
trigger down scaling of under utilized nodes.
This strategy **must** be used with the scheduler scoring strategy `MostAllocated`. The parameters of this strategy are
configured under `nodeResourceUtilizationThresholds`.
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage. The percentage is
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node.
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
Currently, pods request resource requirements are considered for computing node resource utilization.
Any node above `thresholds` is considered appropriately utilized and is not considered for eviction.
The `thresholds` param could be tuned as per your cluster requirements. Note that this
strategy evicts pods from `underutilized nodes` (those with usage below `thresholds`)
so that they can be recreated in appropriately utilized nodes.
The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero.
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
**Parameters:**
|Name|Type|
|---|---|
|`thresholds`|map(string:int)|
|`numberOfNodes`|int|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"HighNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
```
Policy should pass the following validation checks:
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`. If any of these resource types is not specified, all its thresholds default to 100%.
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional, and will not be used to compute node's usage if it's not specified in `thresholds` explicitly.
* `thresholds` can not be nil.
* The valid range of the resource's percentage value is \[0, 100\]
There is another parameter associated with the `HighNodeUtilization` strategy, called `numberOfNodes`.
This parameter can be configured to activate the strategy only when the number of under utilized nodes
is above the configured value. This could be helpful in large clusters where a few nodes could go
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
### RemovePodsViolatingInterPodAntiAffinity
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example,
@@ -235,6 +358,8 @@ node.
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
@@ -272,6 +397,8 @@ podA gets evicted from nodeA.
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
@@ -293,13 +420,22 @@ pod "podA" with a toleration to tolerate a taint ``key=value:NoSchedule`` schedu
node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations
and will be evicted.
Node taints can be excluded from consideration by specifying a list of excludedTaints. If a node taint key **or**
key=value matches an excludedTaints entry, the taint will be ignored.
For example, excludedTaints entry "dedicated" would match all taints with key "dedicated", regardless of value.
excludedTaints entry "dedicated=special-user" would match taints with key "dedicated" and value "special-user".
**Parameters:**
|Name|Type|
|---|---|
|`excludedTaints`|list(string)|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
@@ -309,6 +445,10 @@ kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingNodeTaints":
enabled: true
params:
excludedTaints:
- dedicated=special-user # exclude taints with key "dedicated" and value "special-user"
- reserved # exclude all taints with key "reserved"
````
### RemovePodsViolatingTopologySpreadConstraint
@@ -317,13 +457,21 @@ This strategy makes sure that pods violating [topology spread constraints](https
are evicted from nodes. Specifically, it tries to evict the minimum number of pods required to balance topology domains to within each constraint's `maxSkew`.
This strategy requires k8s version 1.18 at a minimum.
By default, this strategy only deals with hard constraints, setting parameter `includeSoftConstraints` to `true` will
include soft constraints.
Strategy parameter `labelSelector` is not utilized when balancing topology domains and is only applied during eviction to determine if the pod can be evicted.
**Parameters:**
|Name|Type|
|---|---|
|`includeSoftConstraints`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
@@ -333,15 +481,18 @@ kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: false
```
### RemovePodsHavingTooManyRestarts
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
include `podRestartThreshold`, which is the number of restarts at which a pod should be evicted, and `includingInitContainers`,
which determines whether init container restarts should be factored into that calculation.
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
include `podRestartThreshold`, which is the number of restarts (summed over all eligible containers) at which a pod
should be evicted, and `includingInitContainers`, which determines whether init container restarts should be factored
into that calculation.
**Parameters:**
@@ -352,6 +503,8 @@ which determines whether init container restarts should be factored into that ca
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
@@ -371,18 +524,24 @@ strategies:
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
You can also specify `podStatusPhases` to `only` evict pods with specific `StatusPhases`, currently this parameter is limited
to `Running` and `Pending`.
You can also specify `states` parameter to **only** evict pods matching the following conditions:
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`
If a value for `states` or `podStatusPhases` is not specified,
Pods in any state (even `Running`) are considered for eviction.
**Parameters:**
|Name|Type|
|---|---|
|`maxPodLifeTimeSeconds`|int|
|`podStatusPhases`|list(string)|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|Name|Type|Notes|
|---|---|---|
|`maxPodLifeTimeSeconds`|int||
|`podStatusPhases`|list(string)|Deprecated in v0.25+ Use `states` instead|
|`states`|list(string)|Only supported in v0.25+|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))||
|`namespaces`|(see [namespace filtering](#namespace-filtering))||
|`labelSelector`|(see [label filtering](#label-filtering))||
**Example:**
@@ -395,8 +554,50 @@ strategies:
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
podStatusPhases:
states:
- "Pending"
- "PodInitializing"
```
### RemoveFailedPods
This strategy evicts pods that are in failed status phase.
You can provide an optional parameter to filter by failed `reasons`.
`reasons` can be expanded to include reasons of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
You can specify an optional parameter `minPodLifetimeSeconds` to evict pods that are older than specified seconds.
Lastly, you can specify the optional parameter `excludeOwnerKinds` and if a pod
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
**Parameters:**
|Name|Type|
|---|---|
|`minPodLifetimeSeconds`|uint|
|`excludeOwnerKinds`|list(string)|
|`reasons`|list(string)|
|`includingInitContainers`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveFailedPods":
enabled: true
params:
failedPods:
reasons:
- "NodeAffinity"
includingInitContainers: true
excludeOwnerKinds:
- "Job"
minPodLifetimeSeconds: 3600
```
## Filter Pods
@@ -411,6 +612,7 @@ The following strategies accept a `namespaces` parameter which allows to specify
* `RemovePodsViolatingInterPodAntiAffinity`
* `RemoveDuplicates`
* `RemovePodsViolatingTopologySpreadConstraint`
* `RemoveFailedPods`
For example:
@@ -457,6 +659,9 @@ All strategies are able to configure a priority threshold, only pods under the t
specify this threshold by setting `thresholdPriorityClassName`(setting the threshold to the value of the given
priority class) or `thresholdPriority`(directly setting the threshold) parameters. By default, this threshold
is set to the value of `system-cluster-critical` priority class.
Note: Setting `evictSystemCriticalPods` to true disables priority filtering entirely.
E.g.
Setting `thresholdPriority`
@@ -488,20 +693,105 @@ strategies:
Note that you can't configure both `thresholdPriority` and `thresholdPriorityClassName`, if the given priority class
does not exist, descheduler won't create it and will throw an error.
### Label filtering
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#labelselector-v1-meta)
to filter pods by their labels:
* `PodLifeTime`
* `RemovePodsHavingTooManyRestarts`
* `RemovePodsViolatingNodeTaints`
* `RemovePodsViolatingNodeAffinity`
* `RemovePodsViolatingInterPodAntiAffinity`
* `RemovePodsViolatingTopologySpreadConstraint`
* `RemoveFailedPods`
This allows running strategies among pods the descheduler is interested in.
For example:
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
labelSelector:
matchLabels:
component: redis
matchExpressions:
- {key: tier, operator: In, values: [cache]}
- {key: environment, operator: NotIn, values: [dev]}
```
### Node Fit filtering
The following strategies accept a `nodeFit` boolean parameter which can optimize descheduling:
* `RemoveDuplicates`
* `LowNodeUtilization`
* `HighNodeUtilization`
* `RemovePodsViolatingInterPodAntiAffinity`
* `RemovePodsViolatingNodeAffinity`
* `RemovePodsViolatingNodeTaints`
* `RemovePodsViolatingTopologySpreadConstraint`
* `RemovePodsHavingTooManyRestarts`
* `RemoveFailedPods`
If set to `true` the descheduler will consider whether or not the pods that meet eviction criteria will fit on other nodes before evicting them. If a pod cannot be rescheduled to another node, it will not be evicted. Currently the following criteria are considered when setting `nodeFit` to `true`:
- A `nodeSelector` on the pod
- Any `tolerations` on the pod and any `taints` on the other nodes
- `nodeAffinity` on the pod
- Resource `requests` made by the pod and the resources available on other nodes
- Whether any of the other nodes are marked as `unschedulable`
E.g.
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"LowNodeUtilization":
enabled: true
params:
nodeFit: true
nodeResourceUtilizationThresholds:
thresholds:
"cpu": 20
"memory": 20
"pods": 20
targetThresholds:
"cpu": 50
"memory": 50
"pods": 50
```
Note that node fit filtering references the current pod spec, and not that of it's owner.
Thus, if the pod is owned by a ReplicationController (and that ReplicationController was modified recently),
the pod may be running with an outdated spec, which the descheduler will reference when determining node fit.
This is expected behavior as the descheduler is a "best-effort" mechanism.
Using Deployments instead of ReplicationControllers provides an automated rollout of pod spec changes, therefore ensuring that the descheduler has an up-to-date view of the cluster state.
## Pod Evictions
When the descheduler decides to evict pods from a node, it employs the following general mechanism:
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted.
* Pods (static or mirrored pods or stand alone pods) not part of an RC, RS, Deployment or Job are
never evicted because these pods won't be recreated.
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted (unless `evictSystemCriticalPods: true` is set).
* Pods (static or mirrored pods or standalone pods) not part of an ReplicationController, ReplicaSet(Deployment), StatefulSet, or Job are
never evicted because these pods won't be recreated. (Standalone pods in failed status phase can be evicted by setting `evictFailedBarePods: true`)
* Pods associated with DaemonSets are never evicted.
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set)
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set).
* Pods with PVCs are evicted (unless `ignorePvcPods: true` is set).
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
best effort pods are evicted before burstable and guaranteed pods.
* All types of pods with the annotation descheduler.alpha.kubernetes.io/evict are evicted. This
* All types of pods with the annotation `descheduler.alpha.kubernetes.io/evict` are eligible for eviction. This
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
Users should know how and if the pod will be recreated.
* Pods with a non-nil DeletionTimestamp are not evicted by default.
Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable.
@@ -510,6 +800,33 @@ Setting `--v=4` or greater on the Descheduler will log all reasons why any pod i
Pods subject to a Pod Disruption Budget(PDB) are not evicted if descheduling violates its PDB. The pods
are evicted by using the eviction subresource to handle PDB.
## High Availability
In High Availability mode, Descheduler starts [leader election](https://github.com/kubernetes/client-go/tree/master/tools/leaderelection) process in Kubernetes. You can activate HA mode
if you choose to deploy your application as Deployment.
Deployment starts with 1 replica by default. If you want to use more than 1 replica, you must consider
enable High Availability mode since we don't want to run descheduler pods simultaneously.
### Configure HA Mode
The leader election process can be enabled by setting `--leader-elect` in the CLI. You can also set
`--set=leaderElection.enabled=true` flag if you are using Helm.
To get best results from HA mode some additional configurations might require:
* Configure a [podAntiAffinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node) rule if you want to schedule onto a node only if that node is in the same zone as at least one already-running descheduler
* Set the replica count greater than 1
## Metrics
| name | type | description |
|-------|-------|----------------|
| build_info | gauge | constant 1 |
| pods_evicted | CounterVec | total number of pods evicted |
The metrics are served through https://localhost:10258/metrics by default.
The address and port can be changed by setting `--binding-address` and `--secure-port` flags.
## Compatibility Matrix
The below compatibility matrix shows the k8s client package(client-go, apimachinery, etc) versions that descheduler
is compiled with. At this time descheduler does not have a hard dependency to a specific k8s release. However a
@@ -519,15 +836,19 @@ v0.18 should work with k8s v1.18, v1.17, and v1.16.
Starting with descheduler release v0.18 the minor version of descheduler matches the minor version of the k8s client
packages that it is compiled with.
Descheduler | Supported Kubernetes Version
-------------|-----------------------------
v0.20 | v1.20
v0.19 | v1.19
v0.18 | v1.18
v0.10 | v1.17
v0.4-v0.9 | v1.9+
v0.1-v0.3 | v1.7-v1.8
| Descheduler | Supported Kubernetes Version |
|-------------|------------------------------|
| v0.25 | v1.25 |
| v0.24 | v1.24 |
| v0.23 | v1.23 |
| v0.22 | v1.22 |
| v0.21 | v1.21 |
| v0.20 | v1.20 |
| v0.19 | v1.19 |
| v0.18 | v1.18 |
| v0.10 | v1.17 |
| v0.4-v0.9 | v1.9+ |
| v0.1-v0.3 | v1.7-v1.8 |
## Getting Involved and Contributing

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: descheduler
version: 0.20.0
appVersion: 0.20.0
version: 0.25.0
appVersion: 0.25.0
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
keywords:
- kubernetes

View File

@@ -43,23 +43,45 @@ The command removes all the Kubernetes components associated with the chart and
The following table lists the configurable parameters of the _descheduler_ chart and their default values.
| Parameter | Description | Default |
| ------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------ |
| `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` |
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` |
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `nil` |
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
| `podSecurityPolicy.create` | If `true`, create PodSecurityPolicy | `true` |
| `resources.cpuRequest` | Descheduler container CPU request | `500m` |
| `resources.memoryRequest` | Descheduler container memory request | `256Mi` |
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
| Parameter | Description | Default |
|-------------------------------------|-----------------------------------------------------------------------------------------------------------------------|--------------------------------------|
| `kind` | Use as CronJob or Deployment | `CronJob` |
| `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` |
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
| `imagePullSecrets` | Docker repository secrets | `[]` |
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` |
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `nil` |
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
| `replicas` | The replica count for Deployment | `1` |
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `service.enabled` | If `true`, create a service for deployment | `false` |
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
| `serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `nil` |
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
| `commonLabels` | Labels to apply to all resources | `{}` |
| `livenessProbe` | Liveness probe configuration for the descheduler container | _see values.yaml_ |

View File

@@ -1 +1,7 @@
Descheduler installed as a cron job.
Descheduler installed as a {{ .Values.kind }}.
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.replicas 1.0}}
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
{{- end}}
{{- end}}

View File

@@ -42,6 +42,17 @@ app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.commonLabels}}
{{ toYaml .Values.commonLabels }}
{{- end }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "descheduler.selectorLabels" -}}
app.kubernetes.io/name: {{ include "descheduler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
@@ -54,3 +65,30 @@ Create the name of the service account to use
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Leader Election
*/}}
{{- define "descheduler.leaderElection"}}
{{- if .Values.leaderElection -}}
- --leader-elect={{ .Values.leaderElection.enabled }}
{{- if .Values.leaderElection.leaseDuration }}
- --leader-elect-lease-duration={{ .Values.leaderElection.leaseDuration }}
{{- end }}
{{- if .Values.leaderElection.renewDeadline }}
- --leader-elect-renew-deadline={{ .Values.leaderElection.renewDeadline }}
{{- end }}
{{- if .Values.leaderElection.retryPeriod }}
- --leader-elect-retry-period={{ .Values.leaderElection.retryPeriod }}
{{- end }}
{{- if .Values.leaderElection.resourceLock }}
- --leader-elect-resource-lock={{ .Values.leaderElection.resourceLock }}
{{- end }}
{{- if .Values.leaderElection.resourceName }}
- --leader-elect-resource-name={{ .Values.leaderElection.resourceName }}
{{- end }}
{{- if .Values.leaderElection.resourceNamescape }}
- --leader-elect-resource-namespace={{ .Values.leaderElection.resourceNamescape }}
{{- end -}}
{{- end }}
{{- end }}

View File

@@ -14,7 +14,7 @@ rules:
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "delete"]
@@ -24,11 +24,13 @@ rules:
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["get", "watch", "list"]
{{- if .Values.podSecurityPolicy.create }}
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "descheduler.fullname" . }}
{{- if .Values.leaderElection.enabled }}
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
verbs: ["get", "patch", "delete"]
{{- end }}
{{- end -}}

View File

@@ -2,6 +2,7 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
data:

View File

@@ -1,11 +1,16 @@
apiVersion: batch/v1beta1
{{- if eq .Values.kind "CronJob" }}
apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
kind: CronJob
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
schedule: {{ .Values.schedule | quote }}
{{- if .Values.suspend }}
suspend: {{ .Values.suspend }}
{{- end }}
concurrencyPolicy: "Forbid"
{{- if .Values.startingDeadlineSeconds }}
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
@@ -27,17 +32,32 @@ spec:
{{- .Values.podAnnotations | toYaml | nindent 12 }}
{{- end }}
labels:
app.kubernetes.io/name: {{ include "descheduler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- include "descheduler.selectorLabels" . | nindent 12 }}
{{- if .Values.podLabels }}
{{- .Values.podLabels | toYaml | nindent 12 }}
{{- end }}
spec:
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
restartPolicy: "Never"
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 10 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
@@ -53,8 +73,18 @@ spec:
- {{ $value | quote }}
{{- end }}
{{- end }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 16 }}
resources:
{{- toYaml .Values.resources | nindent 16 }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
@@ -62,3 +92,4 @@ spec:
- name: policy-volume
configMap:
name: {{ template "descheduler.fullname" . }}
{{- end }}

View File

@@ -0,0 +1,94 @@
{{- if eq .Values.kind "Deployment" }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
{{- if gt .Values.replicas 1.0}}
{{- if not .Values.leaderElection.enabled }}
{{- fail "You must set leaderElection to use more than 1 replica"}}
{{- end}}
replicas: {{ required "leaderElection required for running more than one replica" .Values.replicas }}
{{- else }}
replicas: 1
{{- end }}
selector:
matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "descheduler.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{- .Values.podLabels | toYaml | nindent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.podAnnotations }}
{{- .Values.podAnnotations | toYaml | nindent 8 }}
{{- end }}
spec:
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 10 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--descheduling-interval"
- {{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
{{- range $key, $value := .Values.cmdOptions }}
- {{ printf "--%s" $key | quote }}
{{- if $value }}
- {{ $value | quote }}
{{- end }}
{{- end }}
{{- include "descheduler.leaderElection" . | nindent 12 }}
ports:
- containerPort: 10258
protocol: TCP
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
volumes:
- name: policy-volume
configMap:
name: {{ template "descheduler.fullname" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -1,38 +0,0 @@
{{- if .Values.podSecurityPolicy.create -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "descheduler.fullname" . }}
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'secret'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: true
{{- end -}}

View File

@@ -0,0 +1,21 @@
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.service.enabled true }}
apiVersion: v1
kind: Service
metadata:
labels:
{{- include "descheduler.labels" . | nindent 4 }}
name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
clusterIP: None
ports:
- name: http-metrics
port: 10258
protocol: TCP
targetPort: 10258
selector:
{{- include "descheduler.selectorLabels" . | nindent 4 }}
type: ClusterIP
{{- end }}
{{- end }}

View File

@@ -3,6 +3,10 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "descheduler.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.serviceAccount.annotations }}
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,41 @@
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.serviceMonitor.enabled true }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "descheduler.fullname" . }}-servicemonitor
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
jobLabel: jobLabel
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }}
endpoints:
- honorLabels: {{ .Values.serviceMonitor.honorLabels | default true }}
port: http-metrics
{{- if .Values.serviceMonitor.interval }}
interval: {{ .Values.serviceMonitor.interval }}
{{- end }}
scheme: https
tlsConfig:
{{- if eq .Values.serviceMonitor.insecureSkipVerify true }}
insecureSkipVerify: true
{{- end }}
{{- if .Values.serviceMonitor.serverName }}
serverName: {{ .Values.serviceMonitor.serverName }}
{{- end}}
{{- if .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{ tpl (toYaml .Values.serviceMonitor.metricRelabelings | indent 4) . }}
{{- end }}
{{- if .Values.serviceMonitor.relabelings }}
relabelings:
{{ tpl (toYaml .Values.serviceMonitor.relabelings | indent 4) . }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -2,12 +2,18 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# CronJob or Deployment
kind: CronJob
image:
repository: k8s.gcr.io/descheduler/descheduler
# Overrides the image tag whose default is the chart version
tag: ""
pullPolicy: IfNotPresent
imagePullSecrets:
# - name: container-registry-secret
resources:
requests:
cpu: 500m
@@ -19,30 +25,67 @@ resources:
nameOverride: ""
fullnameOverride: ""
# labels that'll be applied to all resources
commonLabels: {}
cronJobApiVersion: "batch/v1"
schedule: "*/2 * * * *"
#startingDeadlineSeconds: 200
#successfulJobsHistoryLimit: 1
#failedJobsHistoryLimit: 1
suspend: false
# startingDeadlineSeconds: 200
# successfulJobsHistoryLimit: 1
# failedJobsHistoryLimit: 1
# Required when running as a Deployment
deschedulingInterval: 5m
# Specifies the replica count for Deployment
# Set leaderElection if you want to use more than 1 replica
# Set affinity.podAntiAffinity rule if you want to schedule onto a node
# only if that node is in the same zone as at least one already-running descheduler
replicas: 1
# Specifies whether Leader Election resources should be created
# Required when running as a Deployment
leaderElection: {}
# enabled: true
# leaseDuration: 15s
# renewDeadline: 10s
# retryPeriod: 2s
# resourceLock: "leases"
# resourceName: "descheduler"
# resourceNamescape: "kube-system"
cmdOptions:
v: 3
# evict-local-storage-pods:
# max-pods-to-evict-per-node: 10
# node-selector: "key1=value1,key2=value2"
deschedulerPolicy:
# nodeSelector: "key1=value1,key2=value2"
# maxNoOfPodsToEvictPerNode: 10
# maxNoOfPodsToEvictPerNamespace: 10
# ignorePvcPods: true
# evictLocalStoragePods: true
strategies:
RemoveDuplicates:
enabled: true
RemovePodsHavingTooManyRestarts:
enabled: true
params:
podsHavingTooManyRestarts:
podRestartThreshold: 100
includingInitContainers: true
RemovePodsViolatingNodeTaints:
enabled: true
RemovePodsViolatingNodeAffinity:
enabled: true
params:
nodeAffinityType:
- requiredDuringSchedulingIgnoredDuringExecution
nodeAffinityType:
- requiredDuringSchedulingIgnoredDuringExecution
RemovePodsViolatingInterPodAntiAffinity:
enabled: true
RemovePodsViolatingTopologySpreadConstraint:
enabled: true
params:
includeSoftConstraints: false
LowNodeUtilization:
enabled: true
params:
@@ -58,17 +101,79 @@ deschedulerPolicy:
priorityClassName: system-cluster-critical
nodeSelector: {}
# foo: bar
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - descheduler
# topologyKey: "kubernetes.io/hostname"
tolerations: []
# - key: 'management'
# operator: 'Equal'
# value: 'tool'
# effect: 'NoSchedule'
rbac:
# Specifies whether RBAC resources should be created
create: true
podSecurityPolicy:
# Specifies whether PodSecurityPolicy should be created.
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Specifies custom annotations for the serviceAccount
annotations: {}
podAnnotations: {}
podLabels: {}
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
service:
enabled: false
serviceMonitor:
enabled: false
# The namespace where Prometheus expects to find service monitors.
# namespace: ""
interval: ""
# honorLabels: true
insecureSkipVerify: true
serverName: null
metricRelabelings: []
# - action: keep
# regex: 'descheduler_(build_info|pods_evicted)'
# sourceLabels: [__name__]
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace

View File

@@ -1,13 +1,13 @@
# See https://cloud.google.com/cloud-build/docs/build-config
# this must be specified in seconds. If omitted, defaults to 600s (10 mins)
timeout: 1200s
timeout: 1500s
# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF,
# or any new substitutions added in the future.
options:
substitution_option: ALLOW_LOOSE
steps:
- name: 'gcr.io/k8s-testimages/gcb-docker-gcloud:v20190906-745fed4'
- name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20211118-2f2d816b90'
entrypoint: make
env:
- DOCKER_CLI_EXPERIMENTAL=enabled

View File

@@ -18,21 +18,31 @@ limitations under the License.
package options
import (
"github.com/spf13/pflag"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"time"
"k8s.io/component-base/logs"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiserveroptions "k8s.io/apiserver/pkg/server/options"
clientset "k8s.io/client-go/kubernetes"
componentbaseconfig "k8s.io/component-base/config"
componentbaseoptions "k8s.io/component-base/config/options"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
)
const (
DefaultDeschedulerPort = 10258
)
// DeschedulerServer configuration
type DeschedulerServer struct {
componentconfig.DeschedulerConfiguration
Client clientset.Interface
Logs *logs.Options
Client clientset.Interface
EventClient clientset.Interface
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
DisableMetrics bool
}
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
@@ -41,21 +51,28 @@ func NewDeschedulerServer() (*DeschedulerServer, error) {
if err != nil {
return nil, err
}
secureServing := apiserveroptions.NewSecureServingOptions().WithLoopback()
secureServing.BindPort = DefaultDeschedulerPort
return &DeschedulerServer{
DeschedulerConfiguration: *cfg,
Logs: logs.NewOptions(),
SecureServing: secureServing,
}, nil
}
// Validation checks for DeschedulerServer.
func (s *DeschedulerServer) Validate() error {
var errs []error
errs = append(errs, s.Logs.Validate()...)
return utilerrors.NewAggregate(errs)
}
func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, error) {
versionedCfg := v1alpha1.DeschedulerConfiguration{}
versionedCfg := v1alpha1.DeschedulerConfiguration{
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: false,
LeaseDuration: metav1.Duration{Duration: 137 * time.Second},
RenewDeadline: metav1.Duration{Duration: 107 * time.Second},
RetryPeriod: metav1.Duration{Duration: 26 * time.Second},
ResourceLock: "leases",
ResourceName: "descheduler",
ResourceNamespace: "kube-system",
},
}
deschedulerscheme.Scheme.Default(&versionedCfg)
cfg := componentconfig.DeschedulerConfiguration{}
if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
@@ -66,15 +83,14 @@ func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, err
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&rs.Logging.Format, "logging-format", rs.Logging.Format, `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
fs.StringVar(&rs.Logging.Format, "logging-format", "text", `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "DEPRECATED: selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler")
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "DEPRECATED: enables evicting pods using local storage by descheduler")
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
rs.SecureServing.AddFlags(fs)
}

View File

@@ -18,15 +18,24 @@ limitations under the License.
package app
import (
"flag"
"context"
"io"
"os/signal"
"syscall"
"k8s.io/apiserver/pkg/server/healthz"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/descheduler"
"github.com/spf13/cobra"
aflag "k8s.io/component-base/cli/flag"
apiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/mux"
restclient "k8s.io/client-go/rest"
registry "k8s.io/component-base/logs/api/v1"
_ "k8s.io/component-base/logs/json/register"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/klog/v2"
)
@@ -43,26 +52,58 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
Short: "descheduler",
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
Run: func(cmd *cobra.Command, args []string) {
s.Logs.LogFormat = s.Logging.Format
s.Logs.Apply()
// s.Logs.Config.Format = s.Logging.Format
if err := s.Validate(); err != nil {
klog.ErrorS(err, "failed to validate server configuration")
// LoopbackClientConfig is a config for a privileged loopback connection
var LoopbackClientConfig *restclient.Config
var SecureServing *apiserver.SecureServingInfo
if err := s.SecureServing.ApplyTo(&SecureServing, &LoopbackClientConfig); err != nil {
klog.ErrorS(err, "failed to apply secure server configuration")
return
}
err := Run(s)
var factory registry.LogFormatFactory
if factory == nil {
klog.ClearLogger()
} else {
log, logrFlush := factory.Create(registry.LoggingConfiguration{
Format: s.Logging.Format,
})
defer logrFlush()
klog.SetLogger(log)
}
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
if !s.DisableMetrics {
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
}
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
stoppedCh, _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done())
if err != nil {
klog.Fatalf("failed to start secure server: %v", err)
return
}
err = Run(ctx, s)
if err != nil {
klog.ErrorS(err, "descheduler server")
}
done()
// wait for metrics server to close
<-stoppedCh
},
}
cmd.SetOut(out)
flags := cmd.Flags()
flags.SetNormalizeFunc(aflag.WordSepNormalizeFunc)
flags.AddGoFlagSet(flag.CommandLine)
s.AddFlags(flags)
return cmd
}
func Run(rs *options.DeschedulerServer) error {
return descheduler.Run(rs)
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
return descheduler.Run(ctx, rs)
}

View File

@@ -18,77 +18,19 @@ package app
import (
"fmt"
"regexp"
"runtime"
"strings"
"github.com/spf13/cobra"
"sigs.k8s.io/descheduler/pkg/version"
)
var (
// version is a constant representing the version tag that
// generated this build. It should be set during build via -ldflags.
version string
// buildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
//It should be set during build via -ldflags.
buildDate string
)
// Info holds the information related to descheduler app version.
type Info struct {
Major string `json:"major"`
Minor string `json:"minor"`
GitVersion string `json:"gitVersion"`
BuildDate string `json:"buildDate"`
GoVersion string `json:"goVersion"`
Compiler string `json:"compiler"`
Platform string `json:"platform"`
}
// Get returns the overall codebase version. It's for detecting
// what code a binary was built from.
func Get() Info {
majorVersion, minorVersion := splitVersion(version)
return Info{
Major: majorVersion,
Minor: minorVersion,
GitVersion: version,
BuildDate: buildDate,
GoVersion: runtime.Version(),
Compiler: runtime.Compiler,
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
}
}
func NewVersionCommand() *cobra.Command {
var versionCmd = &cobra.Command{
Use: "version",
Short: "Version of descheduler",
Long: `Prints the version of descheduler.`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Descheduler version %+v\n", Get())
fmt.Printf("Descheduler version %+v\n", version.Get())
},
}
return versionCmd
}
// splitVersion splits the git version to generate major and minor versions needed.
func splitVersion(version string) (string, string) {
if version == "" {
return "", ""
}
// Version from an automated container build environment for a tag. For example v20200521-v0.18.0.
m1, _ := regexp.MatchString(`^v\d{8}-v\d+\.\d+\.\d+$`, version)
// Version from an automated container build environment(not a tag) or a local build. For example v20201009-v0.18.0-46-g939c1c0.
m2, _ := regexp.MatchString(`^v\d{8}-v\d+\.\d+\.\d+-\w+-\w+$`, version)
if m1 || m2 {
semVer := strings.Split(version, "-")[1]
return strings.Trim(strings.Split(semVer, ".")[0], "v"), strings.Split(semVer, ".")[1] + "+"
}
// Something went wrong
return "", ""
}

View File

@@ -17,22 +17,23 @@ limitations under the License.
package main
import (
"fmt"
"k8s.io/component-base/logs"
"os"
"k8s.io/component-base/cli"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/cmd/descheduler/app"
)
func init() {
klog.SetOutput(os.Stdout)
klog.InitFlags(nil)
}
func main() {
out := os.Stdout
cmd := app.NewDeschedulerCommand(out)
cmd.AddCommand(app.NewVersionCommand())
logs.InitLogs()
defer logs.FlushLogs()
if err := cmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
code := cli.Run(cmd)
os.Exit(code)
}

View File

@@ -3,10 +3,10 @@
## Required Tools
- [Git](https://git-scm.com/downloads)
- [Go 1.15+](https://golang.org/dl/)
- [Go 1.16+](https://golang.org/dl/)
- [Docker](https://docs.docker.com/install/)
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
- [kind v0.9.0+](https://kind.sigs.k8s.io/)
- [kind v0.10.0+](https://kind.sigs.k8s.io/)
## Build and Run
@@ -31,7 +31,7 @@ View all CLI options.
## Run Tests
```
GOOS=linux make dev-image
kind create cluster --config hack/kind_config.yaml
make kind-multi-node
kind load docker-image <image name>
kind get kubeconfig > /tmp/admin.conf
export KUBECONFIG=/tmp/admin.conf
@@ -39,5 +39,32 @@ make test-unit
make test-e2e
```
## Build Helm Package locally
If you made some changes in the chart, and just want to check if templating is ok, or if the chart is buildable, you can run this command to have a package built from the `./charts` directory.
```
make build-helm
```
## Lint Helm Chart locally
To check linting of your changes in the helm chart locally you can run:
```
make lint-helm
```
## Test helm changes locally with kind and ct
You will need kind and docker (or equivalent) installed. We can use ct public image to avoid installing ct and all its dependencies.
```
make kind-multi-node
make ct-helm
```
### Miscellaneous
See the [hack directory](https://github.com/kubernetes-sigs/descheduler/tree/master/hack) for additional tools and scripts used for developing the descheduler.

16
docs/proposals.md Normal file
View File

@@ -0,0 +1,16 @@
# Proposals
This document walk you through about all the enhancements proposals for descheduler.
## Descheduler v1alpha2 Design Proposal
```yaml
title: Descheduler v1alpha2 Design Proposal
authors:
- "@damemi"
link:
- https://docs.google.com/document/d/1S1JCh-0F-QCJvBBG-kbmXiHAJFF8doArhDIAKbOj93I/edit#heading=h.imbp1ctnc8lx
- https://github.com/kubernetes-sigs/descheduler/issues/679
owning-sig: sig-scheduling
creation-date: 2021-05-01
status: implementable
```

View File

@@ -1,36 +1,82 @@
# Release Guide
## Container Image
The process for publishing each Descheduler release includes a mixture of manual and automatic steps. Over
time, it would be good to automate as much of this process as possible. However, due to current limitations there
is care that must be taken to perform each manual step precisely so that the automated steps execute properly.
### Semi-automatic
## Pre-release Code Changes
1. Make sure your repo is clean by git's standards
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
5. Publish a draft release using the tag you just created
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
7. Publish release
8. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
Before publishing each release, the following code updates must be made:
### Manual
- [ ] (Optional, but recommended) Bump `k8s.io` dependencies to the `-rc` tags. These tags are usually published around upstream code freeze. [Example](https://github.com/kubernetes-sigs/descheduler/pull/539)
- [ ] Bump `k8s.io` dependencies to GA tags once they are published (following the upstream release). [Example](https://github.com/kubernetes-sigs/descheduler/pull/615)
- [ ] Ensure that Go is updated to the same version as upstream. [Example](https://github.com/kubernetes-sigs/descheduler/pull/801)
- [ ] Make CI changes in [github.com/kubernetes/test-infra](https://github.com/kubernetes/test-infra) to add the new version's tests (note, this may also include a Go bump). [Example](https://github.com/kubernetes/test-infra/pull/25833)
- [ ] Update local CI versions for utils (such as golang-ci), kind, and go. [Example - e2e](https://github.com/kubernetes-sigs/descheduler/commit/ac4d576df8831c0c399ee8fff1e85469e90b8c44), [Example - helm](https://github.com/kubernetes-sigs/descheduler/pull/821)
- [ ] Update version references in docs and Readme. [Example](https://github.com/kubernetes-sigs/descheduler/pull/617)
1. Make sure your repo is clean by git's standards
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
5. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION`
6. Build and push the container image to the staging registry `VERSION=$VERSION make push-all`
7. Publish a draft release using the tag you just created
8. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
9. Publish release
10. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
## Release Process
### Notes
It's important to create the tag on the master branch after creating the release-* branch so that the [Helm releaser-action](#helm-chart) can work.
It compares the changes in the action-triggering branch to the latest tag on that branch, so if you tag before creating the new branch there
will be nothing to compare and it will fail (creating a new release branch usually involves no code changes). For this same reason, you should
also tag patch releases (on the release-* branch) *after* pushing changes (if those changes involve bumping the Helm chart version).
When the above pre-release steps are complete and the release is ready to be cut, perform the following steps **in order**
(the flowchart below demonstrates these steps):
**Version release**
1. Create the `git tag` on `master` for the release, eg `v0.24.0`
2. Merge Helm chart version update to `master` (see [Helm chart](#helm-chart) below). [Example](https://github.com/kubernetes-sigs/descheduler/pull/709)
3. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter). [Example](https://github.com/kubernetes/k8s.io/pull/3344)
4. Cut release branch from `master`, eg `release-1.24`
5. Publish release using Github's release process from the git tag you created
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
**Patch release**
1. Pick relevant code change commits to the matching release branch, eg `release-1.24`
2. Create the patch tag on the release branch, eg `v0.24.1` on `release-1.24`
3. Merge Helm chart version update to release branch
4. Perform the image promotion process for the patch version
5. Publish release using Github's release process from the git tag you created
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
### Flowchart
![Flowchart for major and patch releases](release-process.png)
### Image promotion process
Every merge to any branch triggers an [image build and push](https://github.com/kubernetes/test-infra/blob/c36b8e5/config/jobs/image-pushing/k8s-staging-descheduler.yaml) to a `gcr.io` repository.
These automated image builds are snapshots of the code in place at the time of every PR merge and
tagged with the latest git SHA at the time of the build. To create a final release image, the desired
auto-built image SHA is added to a [file upstream](https://github.com/kubernetes/k8s.io/blob/e9e971c/k8s.gcr.io/images/k8s-staging-descheduler/images.yaml) which
copies that image to a public registry.
Automatic builds can be monitored and re-triggered with the [`post-descheduler-push-images` job](https://prow.k8s.io/?job=post-descheduler-push-images) on prow.k8s.io.
Note that images can also be manually built and pushed using `VERSION=$VERSION make push-all` by [users with access](https://github.com/kubernetes/k8s.io/blob/fbee8f67b70304241e613a672c625ad972998ad7/groups/sig-scheduling/groups.yaml#L33-L43).
## Helm Chart
We currently use the [chart-releaser-action GitHub Action](https://github.com/helm/chart-releaser-action) to automatically
publish [Helm chart releases](https://github.com/kubernetes-sigs/descheduler/blob/022e07c/.github/workflows/release.yaml).
This action is triggered when it detects any changes to [`Chart.yaml`](https://github.com/kubernetes-sigs/descheduler/blob/022e07c27853fade6d1304adc0a6ebe02642386c/charts/descheduler/Chart.yaml) on
a `release-*` branch.
Helm chart releases are managed by a separate set of git tags that are prefixed with `descheduler-helm-chart-*`. Example git tag name is `descheduler-helm-chart-0.18.0`.
Released versions of the helm charts are stored in the `gh-pages` branch of this repo.
The major and minor version of the chart matches the descheduler major and minor versions. For example descheduler helm chart version helm-descheduler-chart-0.18.0 corresponds
to descheduler version v0.18.0. The patch version of the descheduler helm chart and the patcher version of the descheduler will not necessarily match. The patch
version of the descheduler helm chart is used to version changes specific to the helm chart.
1. Merge all helm chart changes into the master branch before the release is tagged/cut
1. Ensure that `appVersion` in file `charts/descheduler/Chart.yaml` matches the descheduler version(no `v` prefix)
2. Ensure that `version` in file `charts/descheduler/Chart.yaml` has been incremented. This is the chart version.
2. Make sure your repo is clean by git's standards
3. Follow the release-branch or patch release tagging pattern from the above section.
4. Verify the new helm artifact has been successfully pushed to the `gh-pages` branch
## Notes
The Helm releaser-action compares the changes in the action-triggering branch to the latest tag on that branch, so if you tag before creating the new branch there
will be nothing to compare and it will fail. This is why it's necessary to tag, eg, `v0.24.0` *before* making the changes to the
Helm chart version, so that there is a new diff for the action to find. (Tagging *after* making the Helm chart changes would
also work, but then the code that gets built into the promoted image will be tagged as `descheduler-helm-chart-xxx` rather than `v0.xx.0`).
See [post-descheduler-push-images dashboard](https://testgrid.k8s.io/sig-scheduling#post-descheduler-push-images) for staging registry image build job status.
@@ -56,19 +102,3 @@ Pull image from the staging registry.
```
docker pull gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23f284
```
## Helm Chart
Helm chart releases are managed by a separate set of git tags that are prefixed with `descheduler-helm-chart-*`. Example git tag name is `descheduler-helm-chart-0.18.0`.
Released versions of the helm charts are stored in the `gh-pages` branch of this repo. The [chart-releaser-action GitHub Action](https://github.com/helm/chart-releaser-action)
is setup to build and push the helm charts to the `gh-pages` branch when changes are pushed to a `release-*` branch.
The major and minor version of the chart matches the descheduler major and minor versions. For example descheduler helm chart version helm-descheduler-chart-0.18.0 corresponds
to descheduler version v0.18.0. The patch version of the descheduler helm chart and the patcher version of the descheduler will not necessarily match. The patch
version of the descheduler helm chart is used to version changes specific to the helm chart.
1. Merge all helm chart changes into the master branch before the release is tagged/cut
1. Ensure that `appVersion` in file `charts/descheduler/Chart.yaml` matches the descheduler version(no `v` prefix)
2. Ensure that `version` in file `charts/descheduler/Chart.yaml` has been incremented. This is the chart version.
2. Make sure your repo is clean by git's standards
3. Follow the release-branch or patch release tagging pattern from the above section.
4. Verify the new helm artifact has been successfully pushed to the `gh-pages` branch

BIN
docs/release-process.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 121 KiB

View File

@@ -1,11 +1,22 @@
# User Guide
Starting with descheduler release v0.10.0 container images are available in the official k8s container registry.
* `k8s.gcr.io/descheduler/descheduler`
Also, starting with descheduler release v0.20.0 multi-arch container images are provided. Currently AMD64 and ARM64
container images are provided. Multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from
a registry. Therefore starting with descheduler release v0.20.0 use the below process to download the official descheduler
Descheduler Version | Container Image | Architectures |
------------------- |--------------------------------------------|-------------------------|
v0.25.0 | k8s.gcr.io/descheduler/descheduler:v0.25.0 | AMD64<br>ARM64<br>ARMv7 |
v0.24.1 | k8s.gcr.io/descheduler/descheduler:v0.24.1 | AMD64<br>ARM64<br>ARMv7 |
v0.24.0 | k8s.gcr.io/descheduler/descheduler:v0.24.0 | AMD64<br>ARM64<br>ARMv7 |
v0.23.1 | k8s.gcr.io/descheduler/descheduler:v0.23.1 | AMD64<br>ARM64<br>ARMv7 |
v0.22.0 | k8s.gcr.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
v0.21.0 | k8s.gcr.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
v0.20.0 | k8s.gcr.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
v0.19.0 | k8s.gcr.io/descheduler/descheduler:v0.19.0 | AMD64 |
v0.18.0 | k8s.gcr.io/descheduler/descheduler:v0.18.0 | AMD64 |
v0.10.0 | k8s.gcr.io/descheduler/descheduler:v0.10.0 | AMD64 |
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
starting with descheduler release v0.20.0 use the below process to download the official descheduler
image into a kind cluster.
```
kind create cluster
@@ -27,31 +38,52 @@ Usage:
descheduler [command]
Available Commands:
completion generate the autocompletion script for the specified shell
help Help about any command
version Version of descheduler
Flags:
--add-dir-header If true, adds the file directory to the header of the log messages
--alsologtostderr log to standard error as well as files
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
--dry-run execute descheduler in dry run mode.
--evict-local-storage-pods DEPRECATED: enables evicting pods using local storage by descheduler
-h, --help help for descheduler
--kubeconfig string File with kube configuration.
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log-dir string If non-empty, write log files in this directory
--log-file string If non-empty, use this log file
--log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
--logtostderr log to standard error instead of files (default true)
--max-pods-to-evict-per-node int DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler
--node-selector string DEPRECATED: selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
--policy-config-file string File with descheduler policy configuration.
--skip-headers If true, avoid header prefixes in the log messages
--skip-log-headers If true, avoid headers when opening log files
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level number for the log level verbosity
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
--add-dir-header If true, adds the file directory to the header of the log messages (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--alsologtostderr log to standard error as well as files (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0)
--cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates")
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
--dry-run execute descheduler in dry run mode.
-h, --help help for descheduler
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
--kubeconfig string File with kube configuration.
--leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
--leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 15s)
--leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled. (default 10s)
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'endpoints', 'configmaps', 'leases', 'endpointsleases' and 'configmapsleases'. (default "leases")
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 2s)
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--log_dir string If non-empty, write log files in this directory (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--log_file string If non-empty, use this log file (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
--logging-format string Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning. (default "text")
--logtostderr log to standard error instead of files (default true) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--one-output If true, only write logs to their native severity level (vs also writing to each lower severity level) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
--permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]
--policy-config-file string File with descheduler policy configuration.
--secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 10258)
--skip-headers If true, avoid header prefixes in the log messages (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--skip-log-headers If true, avoid headers when opening log files (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--stderrthreshold severity logs at or above this threshold go to stderr (default 2) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
--tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA.
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
-v, --v Level number for the log level verbosity
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
Use "descheduler [command] --help" for more information about a command.
```
@@ -82,16 +114,17 @@ strategies:
"PodLifeTime":
enabled: true
params:
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
podLifeTime:
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
```
### Balance Cluster By Node Memory Utilization
If your cluster has been running for a long period of time, you may find that the resource utilization is not very
balanced. The `LowNodeUtilization` strategy can be used to rebalance your cluster based on `cpu`, `memory`
balanced. The following two strategies can be used to rebalance your cluster based on `cpu`, `memory`
or `number of pods`.
Using the following policy configuration file, descheduler will rebalance the cluster based on memory by evicting pods
#### Balance high utilization nodes
Using `LowNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods
from nodes with memory utilization over 70% to nodes with memory utilization below 20%.
```
@@ -108,11 +141,35 @@ strategies:
"memory": 70
```
#### Balance low utilization nodes
Using `HighNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods
from nodes with memory utilization lower than 20%. This should be use `NodeResourcesFit` with the `MostAllocated` scoring strategy based on these [doc](https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins).
The evicted pods will be compacted into minimal set of nodes.
```
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"HighNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"memory": 20
```
### Autoheal Node Problems
Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with
[Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and
[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) to automatically remove
Nodes which have problems. Node Problem Detector can detect specific Node problems and taint any Nodes which have those
problems. The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
Nodes which have problems. Node Problem Detector can detect specific Node problems and report them to the API server.
There is a feature called TaintNodeByCondition of the node controller that takes some conditions and turns them into taints. Currently, this only works for the default node conditions: PIDPressure, MemoryPressure, DiskPressure, Ready, and some cloud provider specific conditions.
The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
allocation falls below the Cluster Autoscaler's scale down threshold, the Node will become a scale down candidate
and can be removed by Cluster Autoscaler. These three components form an autohealing cycle for Node problems.
---
**NOTE**
Once [kubernetes/node-problem-detector#565](https://github.com/kubernetes/node-problem-detector/pull/565) is available in NPD, we need to update this section.
---

14
examples/failed-pods.yaml Normal file
View File

@@ -0,0 +1,14 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveFailedPods":
enabled: true
params:
failedPods:
reasons:
- "OutOfcpu"
- "CreateContainerConfigError"
includingInitContainers: true
excludeOwnerKinds:
- "Job"
minPodLifetimeSeconds: 3600 # 1 hour

View File

@@ -0,0 +1,9 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"HighNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"memory": 20

View File

@@ -1,4 +1,3 @@
---
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:

View File

@@ -1,4 +1,3 @@
---
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
@@ -7,3 +6,6 @@ strategies:
params:
podLifeTime:
maxPodLifeTimeSeconds: 604800 # 7 days
states:
- "Pending"
- "PodInitializing"

View File

@@ -23,3 +23,7 @@ strategies:
podsHavingTooManyRestarts:
podRestartThreshold: 100
includingInitContainers: true
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: true

View File

@@ -0,0 +1,8 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
nodeFit: true
includeSoftConstraints: true # Include 'ScheduleAnyways' constraints

118
go.mod
View File

@@ -1,17 +1,115 @@
module sigs.k8s.io/descheduler
go 1.15
go 1.19
require (
github.com/client9/misspell v0.3.4
github.com/spf13/cobra v0.0.5
github.com/spf13/cobra v1.4.0
github.com/spf13/pflag v1.0.5
k8s.io/api v0.20.0
k8s.io/apimachinery v0.20.0
k8s.io/apiserver v0.20.0
k8s.io/client-go v0.20.0
k8s.io/code-generator v0.20.0
k8s.io/component-base v0.20.0
k8s.io/component-helpers v0.20.0
k8s.io/klog/v2 v2.4.0
k8s.io/api v0.25.0
k8s.io/apimachinery v0.25.0
k8s.io/apiserver v0.25.0
k8s.io/client-go v0.25.0
k8s.io/code-generator v0.25.0
k8s.io/component-base v0.25.0
k8s.io/component-helpers v0.25.0
k8s.io/klog/v2 v2.70.1
k8s.io/utils v0.0.0-20220823124924-e9cbc92d1a73
sigs.k8s.io/mdtoc v1.0.1
)
require (
cloud.google.com/go v0.97.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.27 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.1 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/zapr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
go.etcd.io/etcd/api/v3 v3.5.4 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.4 // indirect
go.etcd.io/etcd/client/v3 v3.5.4 // indirect
go.opentelemetry.io/contrib v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect
go.opentelemetry.io/otel v0.20.0 // indirect
go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect
go.opentelemetry.io/otel/metric v0.20.0 // indirect
go.opentelemetry.io/otel/sdk v0.20.0 // indirect
go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect
go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect
go.opentelemetry.io/otel/trace v0.20.0 // indirect
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.0 // indirect
golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.1.12 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
google.golang.org/grpc v1.47.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.32 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
)

723
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -18,15 +18,15 @@ E2E_GCE_HOME=$DESCHEDULER_ROOT/hack/e2e-gce
create_cluster() {
echo "#################### Creating instances ##########################"
gcloud compute instances create descheduler-$master_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
gcloud compute instances create descheduler-$master_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
# Keeping the --zone here so as to make sure that e2e's can run locally.
echo "gcloud compute instances delete descheduler-$master_uuid --zone=us-east1-b --quiet" > $E2E_GCE_HOME/delete_cluster.sh
gcloud compute instances create descheduler-$node1_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
gcloud compute instances create descheduler-$node1_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
echo "gcloud compute instances delete descheduler-$node1_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
gcloud compute instances create descheduler-$node2_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
gcloud compute instances create descheduler-$node2_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-c --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
# Delete the firewall port created for master.
echo "gcloud compute firewall-rules delete kubeapiserver-$master_uuid --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
@@ -44,10 +44,10 @@ generate_kubeadm_instance_files() {
transfer_install_files() {
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_install.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-c
}
@@ -55,19 +55,19 @@ install_kube() {
# Docker installation.
gcloud compute ssh descheduler-$master_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
gcloud compute ssh descheduler-$node1_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-c
# kubeadm installation.
# 1. Transfer files to master, nodes.
transfer_install_files
# 2. Install kubeadm.
#TODO: Add rm /tmp/kubeadm_install.sh
# Open port for kube API server
gcloud compute firewall-rules create kubeapiserver-$master_uuid --allow tcp:6443 --source-tags=descheduler-$master_uuid --source-ranges=0.0.0.0/0 --description="Opening api server port"
gcloud compute firewall-rules create kubeapiserver-$master_uuid --allow tcp:6443 --source-tags=descheduler-$master_uuid --source-ranges=0.0.0.0/0 --description="Opening api server port"
gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
kubeadm_join_command=$(gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_install.sh; sudo /tmp/kubeadm_install.sh" --zone=us-east1-b|grep 'kubeadm join')
# Copy the kubeconfig file onto /tmp for e2e tests.
# Copy the kubeconfig file onto /tmp for e2e tests.
gcloud compute ssh descheduler-$master_uuid --command "sudo cp /etc/kubernetes/admin.conf /tmp; sudo chmod 777 /tmp/admin.conf" --zone=us-east1-b
gcloud compute scp descheduler-$master_uuid:/tmp/admin.conf /tmp/admin.conf --zone=us-east1-b
@@ -75,16 +75,15 @@ install_kube() {
gcloud compute ssh descheduler-$master_uuid --command "sudo kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml --kubeconfig /etc/kubernetes/admin.conf" --zone=us-east1-b
echo $kubeadm_join_command > $E2E_GCE_HOME/kubeadm_join.sh
# Copy kubeadm_join to every node.
# Copy kubeadm_join to every node.
#TODO: Put these in a loop, so that extension becomes possible.
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-c
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-c
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-c
}

View File

@@ -3,4 +3,16 @@ apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "topology.kubernetes.io/zone=local-a"
- role: worker
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "topology.kubernetes.io/zone=local-b"

View File

@@ -1,3 +1,4 @@
//go:build tools
// +build tools
/*
@@ -22,4 +23,5 @@ package tools
import (
_ "github.com/client9/misspell/cmd/misspell"
_ "k8s.io/code-generator"
_ "sigs.k8s.io/mdtoc"
)

View File

@@ -5,6 +5,6 @@ go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepc
${OS_OUTPUT_BINPATH}/deepcopy-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig,${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig,${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api,${PRJ_PREFIX}/pkg/api/v1alpha1,${PRJ_PREFIX}/pkg/framework/plugins/defaultevictor/" \
--output-file-base zz_generated.deepcopy

View File

@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.13|go1.14|go1.15') ]]; then
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18|go1.19') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi

25
hack/update-toc.sh Executable file
View File

@@ -0,0 +1,25 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
${OS_OUTPUT_BINPATH}/mdtoc --inplace README.md

1
hack/verify-chart.sh Executable file
View File

@@ -0,0 +1 @@
${CONTAINER_ENGINE:-docker} run -it --rm --network host --workdir=/data --volume ~/.kube/config:/root/.kube/config:ro --volume $(pwd):/data quay.io/helmpack/chart-testing:v3.7.0 /bin/bash -c "git config --global --add safe.directory /data; ct install --config=.github/ci/ct.yaml --helm-extra-set-args=\"--set=kind=Deployment\""

36
hack/verify-conversions.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
mkdir -p "${DESCHEDULER_ROOT}/_tmp"
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
_deschedulertmp="${_tmpdir}"
mkdir -p "${_deschedulertmp}"
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
_deschedulertmp="${_deschedulertmp}/descheduler"
pushd "${_deschedulertmp}" > /dev/null 2>&1
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/conversion-gen"
${OS_OUTPUT_BINPATH}/conversion-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "./pkg/apis/componentconfig/v1alpha1,./pkg/api/v1alpha1" \
--output-file-base zz_generated.conversion
popd > /dev/null 2>&1
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
echo "Generated output differs:" >&2
echo "${_out}" >&2
echo "Generated conversions verify failed. Please run ./hack/update-generated-conversions.sh"
exit 1
fi
popd > /dev/null 2>&1
echo "Generated conversions verified."

36
hack/verify-deep-copies.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
mkdir -p "${DESCHEDULER_ROOT}/_tmp"
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
_deschedulertmp="${_tmpdir}"
mkdir -p "${_deschedulertmp}"
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
_deschedulertmp="${_deschedulertmp}/descheduler"
pushd "${_deschedulertmp}" > /dev/null 2>&1
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepcopy-gen"
${OS_OUTPUT_BINPATH}/deepcopy-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "./pkg/apis/componentconfig,./pkg/apis/componentconfig/v1alpha1,./pkg/api,./pkg/api/v1alpha1,./pkg/framework/plugins/defaultevictor/" \
--output-file-base zz_generated.deepcopy
popd > /dev/null 2>&1
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
echo "Generated deep-copies output differs:" >&2
echo "${_out}" >&2
echo "Generated deep-copies verify failed. Please run ./hack/update-generated-deep-copies.sh"
exit 1
fi
popd > /dev/null 2>&1
echo "Generated deep-copies verified."

35
hack/verify-defaulters.sh Executable file
View File

@@ -0,0 +1,35 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
_deschedulertmp="${_tmpdir}"
mkdir -p "${_deschedulertmp}"
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
_deschedulertmp="${_deschedulertmp}/descheduler"
pushd "${_deschedulertmp}" > /dev/null 2>&1
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
${OS_OUTPUT_BINPATH}/defaulter-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--output-file-base zz_generated.defaults
popd > /dev/null 2>&1
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
echo "Generated defaulters output differs:" >&2
echo "${_out}" >&2
echo "Generated defaulters verify failed. Please run ./hack/update-generated-defaulters.sh"
fi
popd > /dev/null 2>&1
echo "Generated Defaulters verified."

View File

@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.13|go1.14|go1.15') ]]; then
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18|go1.19') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi

19
hack/verify-govet.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
go vet ${OS_ROOT}/...

29
hack/verify-toc.sh Executable file
View File

@@ -0,0 +1,29 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
if ! ${OS_OUTPUT_BINPATH}/mdtoc --inplace --dryrun README.md
then
echo "ERROR: Changes detected to table of contents. Run ./hack/update-toc.sh" >&2
exit 1
fi

View File

@@ -4,7 +4,7 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: descheduler-cluster-role
rules:
- apiGroups: [""]
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["create", "update"]
- apiGroups: [""]
@@ -12,7 +12,7 @@ rules:
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "delete"]
@@ -22,6 +22,13 @@ rules:
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["get", "watch", "list"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["descheduler"]
verbs: ["get", "patch", "delete"]
---
apiVersion: v1
kind: ServiceAccount
@@ -33,7 +40,6 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: descheduler-cluster-role-binding
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -42,4 +48,3 @@ subjects:
- name: descheduler-sa
kind: ServiceAccount
namespace: kube-system

View File

@@ -1,5 +1,5 @@
---
apiVersion: batch/v1beta1
apiVersion: batch/v1
kind: CronJob
metadata:
name: descheduler-cronjob
@@ -16,7 +16,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.20.0
image: k8s.gcr.io/descheduler/descheduler:v0.25.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
@@ -31,6 +31,22 @@ spec:
requests:
cpu: "500m"
memory: "256Mi"
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
restartPolicy: "Never"
serviceAccountName: descheduler-sa
volumes:

View File

@@ -0,0 +1,62 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: descheduler
namespace: kube-system
labels:
app: descheduler
spec:
replicas: 1
selector:
matchLabels:
app: descheduler
template:
metadata:
labels:
app: descheduler
spec:
priorityClassName: system-cluster-critical
serviceAccountName: descheduler-sa
containers:
- name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.25.0
imagePullPolicy: IfNotPresent
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--descheduling-interval"
- "5m"
- "--v"
- "3"
ports:
- containerPort: 10258
protocol: TCP
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
resources:
requests:
cpu: 500m
memory: 256Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
volumes:
- name: policy-volume
configMap:
name: descheduler-policy-configmap

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../base
- deployment.yaml

View File

@@ -14,7 +14,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.20.0
image: k8s.gcr.io/descheduler/descheduler:v0.25.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
@@ -29,6 +29,22 @@ spec:
requests:
cpu: "500m"
memory: "256Mi"
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
restartPolicy: "Never"
serviceAccountName: descheduler-sa
volumes:

72
metrics/metrics.go Normal file
View File

@@ -0,0 +1,72 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
"sigs.k8s.io/descheduler/pkg/version"
)
const (
// DeschedulerSubsystem - subsystem name used by descheduler
DeschedulerSubsystem = "descheduler"
)
var (
PodsEvicted = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: DeschedulerSubsystem,
Name: "pods_evicted",
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
StabilityLevel: metrics.ALPHA,
}, []string{"result", "strategy", "namespace", "node"})
buildInfo = metrics.NewGauge(
&metrics.GaugeOpts{
Subsystem: DeschedulerSubsystem,
Name: "build_info",
Help: "Build info about descheduler, including Go version, Descheduler version, Git SHA, Git branch",
ConstLabels: map[string]string{"GoVersion": version.Get().GoVersion, "AppVersion": version.Get().Major + "." + version.Get().Minor, "DeschedulerVersion": version.Get().GitVersion, "GitBranch": version.Get().GitBranch, "GitSha1": version.Get().GitSha1},
StabilityLevel: metrics.ALPHA,
},
)
metricsList = []metrics.Registerable{
PodsEvicted,
buildInfo,
}
)
var registerMetrics sync.Once
// Register all metrics.
func Register() {
// Register the metrics.
registerMetrics.Do(func() {
RegisterMetrics(metricsList...)
})
}
// RegisterMetrics registers a list of metrics.
func RegisterMetrics(extraMetrics ...metrics.Registerable) {
for _, metric := range extraMetrics {
legacyregistry.MustRegister(metric)
}
}

View File

@@ -32,11 +32,23 @@ type DeschedulerPolicy struct {
// NodeSelector for a set of nodes to operate over
NodeSelector *string
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
EvictFailedBarePods *bool
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods *bool
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
EvictSystemCriticalPods *bool
// IgnorePVCPods prevents pods with PVCs from being evicted.
IgnorePVCPods *bool
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *int
MaxNoOfPodsToEvictPerNode *uint
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *uint
}
type StrategyName string
@@ -62,25 +74,33 @@ type Namespaces struct {
// Besides Namespaces only one of its members may be specified
// TODO(jchaloup): move Namespaces ThresholdPriority and ThresholdPriorityClassName to individual strategies
// once the policy version is bumped to v1alpha2
//
// once the policy version is bumped to v1alpha2
type StrategyParameters struct {
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds
NodeAffinityType []string
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts
PodLifeTime *PodLifeTime
RemoveDuplicates *RemoveDuplicates
FailedPods *FailedPods
IncludeSoftConstraints bool
Namespaces *Namespaces
ThresholdPriority *int32
ThresholdPriorityClassName string
LabelSelector *metav1.LabelSelector
NodeFit bool
IncludePreferNoSchedule bool
ExcludedTaints []string
}
type Percentage float64
type ResourceThresholds map[v1.ResourceName]Percentage
type NodeResourceUtilizationThresholds struct {
Thresholds ResourceThresholds
TargetThresholds ResourceThresholds
NumberOfNodes int
UseDeviationThresholds bool
Thresholds ResourceThresholds
TargetThresholds ResourceThresholds
NumberOfNodes int
}
type PodsHavingTooManyRestarts struct {
@@ -94,5 +114,20 @@ type RemoveDuplicates struct {
type PodLifeTime struct {
MaxPodLifeTimeSeconds *uint
PodStatusPhases []string
States []string
// Deprecated: Use States instead.
PodStatusPhases []string
}
type FailedPods struct {
ExcludeOwnerKinds []string
MinPodLifetimeSeconds *uint
Reasons []string
IncludingInitContainers bool
}
type PriorityThreshold struct {
Value *int32
Name string
}

View File

@@ -32,11 +32,23 @@ type DeschedulerPolicy struct {
// NodeSelector for a set of nodes to operate over
NodeSelector *string `json:"nodeSelector,omitempty"`
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
EvictFailedBarePods *bool `json:"evictFailedBarePods,omitempty"`
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"`
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
EvictSystemCriticalPods *bool `json:"evictSystemCriticalPods,omitempty"`
// IgnorePVCPods prevents pods with PVCs from being evicted.
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *int `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
}
type StrategyName string
@@ -67,18 +79,25 @@ type StrategyParameters struct {
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
PodLifeTime *PodLifeTime `json:"podLifeTime,omitempty"`
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
FailedPods *FailedPods `json:"failedPods,omitempty"`
IncludeSoftConstraints bool `json:"includeSoftConstraints"`
Namespaces *Namespaces `json:"namespaces"`
ThresholdPriority *int32 `json:"thresholdPriority"`
ThresholdPriorityClassName string `json:"thresholdPriorityClassName"`
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
NodeFit bool `json:"nodeFit"`
IncludePreferNoSchedule bool `json:"includePreferNoSchedule"`
ExcludedTaints []string `json:"excludedTaints,omitempty"`
}
type Percentage float64
type ResourceThresholds map[v1.ResourceName]Percentage
type NodeResourceUtilizationThresholds struct {
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
NumberOfNodes int `json:"numberOfNodes,omitempty"`
UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
NumberOfNodes int `json:"numberOfNodes,omitempty"`
}
type PodsHavingTooManyRestarts struct {
@@ -92,5 +111,15 @@ type RemoveDuplicates struct {
type PodLifeTime struct {
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
States []string `json:"states,omitempty"`
// Deprecated: Use States instead.
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
}
type FailedPods struct {
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
MinPodLifetimeSeconds *uint `json:"minPodLifetimeSeconds,omitempty"`
Reasons []string `json:"reasons,omitempty"`
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
}

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -23,6 +24,7 @@ package v1alpha1
import (
unsafe "unsafe"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
api "sigs.k8s.io/descheduler/pkg/api"
@@ -55,6 +57,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*FailedPods)(nil), (*api.FailedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_FailedPods_To_api_FailedPods(a.(*FailedPods), b.(*api.FailedPods), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.FailedPods)(nil), (*FailedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_FailedPods_To_v1alpha1_FailedPods(a.(*api.FailedPods), b.(*FailedPods), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Namespaces)(nil), (*api.Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Namespaces_To_api_Namespaces(a.(*Namespaces), b.(*api.Namespaces), scope)
}); err != nil {
@@ -121,8 +133,24 @@ func RegisterConversions(s *runtime.Scheme) error {
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.EvictFailedBarePods = (*bool)(unsafe.Pointer(in.EvictFailedBarePods))
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(uint)
**out = uint(**in)
} else {
out.MaxNoOfPodsToEvictPerNode = nil
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = uint(**in)
} else {
out.MaxNoOfPodsToEvictPerNamespace = nil
}
return nil
}
@@ -134,8 +162,24 @@ func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Descheduler
func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.EvictFailedBarePods = (*bool)(unsafe.Pointer(in.EvictFailedBarePods))
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(int)
**out = int(**in)
} else {
out.MaxNoOfPodsToEvictPerNode = nil
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(int)
**out = int(**in)
} else {
out.MaxNoOfPodsToEvictPerNamespace = nil
}
return nil
}
@@ -168,6 +212,32 @@ func Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.Des
return autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in, out, s)
}
func autoConvert_v1alpha1_FailedPods_To_api_FailedPods(in *FailedPods, out *api.FailedPods, s conversion.Scope) error {
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
out.MinPodLifetimeSeconds = (*uint)(unsafe.Pointer(in.MinPodLifetimeSeconds))
out.Reasons = *(*[]string)(unsafe.Pointer(&in.Reasons))
out.IncludingInitContainers = in.IncludingInitContainers
return nil
}
// Convert_v1alpha1_FailedPods_To_api_FailedPods is an autogenerated conversion function.
func Convert_v1alpha1_FailedPods_To_api_FailedPods(in *FailedPods, out *api.FailedPods, s conversion.Scope) error {
return autoConvert_v1alpha1_FailedPods_To_api_FailedPods(in, out, s)
}
func autoConvert_api_FailedPods_To_v1alpha1_FailedPods(in *api.FailedPods, out *FailedPods, s conversion.Scope) error {
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
out.MinPodLifetimeSeconds = (*uint)(unsafe.Pointer(in.MinPodLifetimeSeconds))
out.Reasons = *(*[]string)(unsafe.Pointer(&in.Reasons))
out.IncludingInitContainers = in.IncludingInitContainers
return nil
}
// Convert_api_FailedPods_To_v1alpha1_FailedPods is an autogenerated conversion function.
func Convert_api_FailedPods_To_v1alpha1_FailedPods(in *api.FailedPods, out *FailedPods, s conversion.Scope) error {
return autoConvert_api_FailedPods_To_v1alpha1_FailedPods(in, out, s)
}
func autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error {
out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
@@ -191,6 +261,7 @@ func Convert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Name
}
func autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
out.UseDeviationThresholds = in.UseDeviationThresholds
out.Thresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
out.TargetThresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
out.NumberOfNodes = in.NumberOfNodes
@@ -203,6 +274,7 @@ func Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtili
}
func autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in *api.NodeResourceUtilizationThresholds, out *NodeResourceUtilizationThresholds, s conversion.Scope) error {
out.UseDeviationThresholds = in.UseDeviationThresholds
out.Thresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
out.TargetThresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
out.NumberOfNodes = in.NumberOfNodes
@@ -216,6 +288,7 @@ func Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtili
func autoConvert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in *PodLifeTime, out *api.PodLifeTime, s conversion.Scope) error {
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
out.States = *(*[]string)(unsafe.Pointer(&in.States))
out.PodStatusPhases = *(*[]string)(unsafe.Pointer(&in.PodStatusPhases))
return nil
}
@@ -227,6 +300,7 @@ func Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in *PodLifeTime, out *api.P
func autoConvert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in *api.PodLifeTime, out *PodLifeTime, s conversion.Scope) error {
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
out.States = *(*[]string)(unsafe.Pointer(&in.States))
out.PodStatusPhases = *(*[]string)(unsafe.Pointer(&in.PodStatusPhases))
return nil
}
@@ -284,9 +358,15 @@ func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *Strat
out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
out.PodLifeTime = (*api.PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
out.RemoveDuplicates = (*api.RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
out.FailedPods = (*api.FailedPods)(unsafe.Pointer(in.FailedPods))
out.IncludeSoftConstraints = in.IncludeSoftConstraints
out.Namespaces = (*api.Namespaces)(unsafe.Pointer(in.Namespaces))
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.NodeFit = in.NodeFit
out.IncludePreferNoSchedule = in.IncludePreferNoSchedule
out.ExcludedTaints = *(*[]string)(unsafe.Pointer(&in.ExcludedTaints))
return nil
}
@@ -301,9 +381,15 @@ func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.S
out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
out.PodLifeTime = (*PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
out.RemoveDuplicates = (*RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
out.FailedPods = (*FailedPods)(unsafe.Pointer(in.FailedPods))
out.IncludeSoftConstraints = in.IncludeSoftConstraints
out.Namespaces = (*Namespaces)(unsafe.Pointer(in.Namespaces))
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.NodeFit = in.NodeFit
out.IncludePreferNoSchedule = in.IncludePreferNoSchedule
out.ExcludedTaints = *(*[]string)(unsafe.Pointer(&in.ExcludedTaints))
return nil
}

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -21,6 +22,7 @@ limitations under the License.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -40,16 +42,36 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(string)
**out = **in
}
if in.EvictFailedBarePods != nil {
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
*out = new(bool)
**out = **in
}
if in.EvictLocalStoragePods != nil {
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
*out = new(bool)
**out = **in
}
if in.EvictSystemCriticalPods != nil {
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
*out = new(bool)
**out = **in
}
if in.IgnorePVCPods != nil {
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
*out = new(bool)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(int)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(int)
**out = **in
}
return
}
@@ -92,6 +114,37 @@ func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MinPodLifetimeSeconds != nil {
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
*out = new(uint)
**out = **in
}
if in.Reasons != nil {
in, out := &in.Reasons, &out.Reasons
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
func (in *FailedPods) DeepCopy() *FailedPods {
if in == nil {
return nil
}
out := new(FailedPods)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
*out = *in
@@ -156,6 +209,11 @@ func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
*out = new(uint)
**out = **in
}
if in.States != nil {
in, out := &in.States, &out.States
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PodStatusPhases != nil {
in, out := &in.PodStatusPhases, &out.PodStatusPhases
*out = make([]string, len(*in))
@@ -283,6 +341,11 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = new(RemoveDuplicates)
(*in).DeepCopyInto(*out)
}
if in.FailedPods != nil {
in, out := &in.FailedPods, &out.FailedPods
*out = new(FailedPods)
(*in).DeepCopyInto(*out)
}
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(Namespaces)
@@ -293,6 +356,16 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = new(int32)
**out = **in
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ExcludedTaints != nil {
in, out := &in.ExcludedTaints, &out.ExcludedTaints
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -21,6 +22,7 @@ limitations under the License.
package api
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -40,14 +42,34 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(string)
**out = **in
}
if in.EvictFailedBarePods != nil {
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
*out = new(bool)
**out = **in
}
if in.EvictLocalStoragePods != nil {
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
*out = new(bool)
**out = **in
}
if in.EvictSystemCriticalPods != nil {
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
*out = new(bool)
**out = **in
}
if in.IgnorePVCPods != nil {
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
*out = new(bool)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(int)
*out = new(uint)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = **in
}
return
@@ -92,6 +114,37 @@ func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MinPodLifetimeSeconds != nil {
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
*out = new(uint)
**out = **in
}
if in.Reasons != nil {
in, out := &in.Reasons, &out.Reasons
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
func (in *FailedPods) DeepCopy() *FailedPods {
if in == nil {
return nil
}
out := new(FailedPods)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
*out = *in
@@ -156,6 +209,11 @@ func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
*out = new(uint)
**out = **in
}
if in.States != nil {
in, out := &in.States, &out.States
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PodStatusPhases != nil {
in, out := &in.PodStatusPhases, &out.PodStatusPhases
*out = make([]string, len(*in))
@@ -190,6 +248,27 @@ func (in *PodsHavingTooManyRestarts) DeepCopy() *PodsHavingTooManyRestarts {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityThreshold) DeepCopyInto(out *PriorityThreshold) {
*out = *in
if in.Value != nil {
in, out := &in.Value, &out.Value
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityThreshold.
func (in *PriorityThreshold) DeepCopy() *PriorityThreshold {
if in == nil {
return nil
}
out := new(PriorityThreshold)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoveDuplicates) DeepCopyInto(out *RemoveDuplicates) {
*out = *in
@@ -283,6 +362,11 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = new(RemoveDuplicates)
(*in).DeepCopyInto(*out)
}
if in.FailedPods != nil {
in, out := &in.FailedPods, &out.FailedPods
*out = new(FailedPods)
(*in).DeepCopyInto(*out)
}
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(Namespaces)
@@ -293,6 +377,16 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = new(int32)
**out = **in
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ExcludedTaints != nil {
in, out := &in.ExcludedTaints, &out.ExcludedTaints
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}

View File

@@ -21,6 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
componentbaseconfig "k8s.io/component-base/config"
registry "k8s.io/component-base/logs/api/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -50,7 +51,13 @@ type DeschedulerConfiguration struct {
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods bool
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
IgnorePVCPods bool
// LeaderElection starts Deployment using leader election loop
LeaderElection componentbaseconfig.LeaderElectionConfiguration
// Logging specifies the options of logging.
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
Logging componentbaseconfig.LoggingConfiguration
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
Logging registry.LoggingConfiguration
}

View File

@@ -0,0 +1,133 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package componentconfig
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/descheduler/pkg/api"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemovePodsViolatingNodeTaintsArgs holds arguments used to configure the RemovePodsViolatingNodeTaints plugin.
type RemovePodsViolatingNodeTaintsArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
IncludePreferNoSchedule bool
ExcludedTaints []string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemoveFailedPodsArgs holds arguments used to configure RemoveFailedPods plugin.
type RemoveFailedPodsArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
ExcludeOwnerKinds []string
MinPodLifetimeSeconds *uint
Reasons []string
IncludingInitContainers bool
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemovePodsViolatingNodeAffinityArgs holds arguments used to configure RemovePodsViolatingNodeAffinity plugin.
type RemovePodsViolatingNodeAffinityArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
NodeAffinityType []string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemovePodsHavingTooManyRestartsArgs holds arguments used to configure RemovePodsHavingTooManyRestarts plugin.
type RemovePodsHavingTooManyRestartsArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
PodRestartThreshold int32
IncludingInitContainers bool
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type RemoveDuplicatesArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
ExcludeOwnerKinds []string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodLifeTimeArgs holds arguments used to configure PodLifeTime plugin.
type PodLifeTimeArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
MaxPodLifeTimeSeconds *uint
States []string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemovePodsViolatingTopologySpreadConstraintArgs holds arguments used to configure RemovePodsViolatingTopologySpreadConstraint plugin.
type RemovePodsViolatingTopologySpreadConstraintArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
IncludeSoftConstraints bool
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RemovePodsViolatingInterPodAntiAffinity holds arguments used to configure RemovePodsViolatingInterPodAntiAffinity plugin.
type RemovePodsViolatingInterPodAntiAffinityArgs struct {
metav1.TypeMeta
Namespaces *api.Namespaces
LabelSelector *metav1.LabelSelector
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type LowNodeUtilizationArgs struct {
metav1.TypeMeta
UseDeviationThresholds bool
Thresholds api.ResourceThresholds
TargetThresholds api.ResourceThresholds
NumberOfNodes int
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type HighNodeUtilizationArgs struct {
metav1.TypeMeta
Thresholds api.ResourceThresholds
NumberOfNodes int
}

View File

@@ -20,6 +20,8 @@ import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
componentbaseconfig "k8s.io/component-base/config"
registry "k8s.io/component-base/logs/api/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -48,4 +50,14 @@ type DeschedulerConfiguration struct {
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
// LeaderElection starts Deployment using leader election loop
LeaderElection componentbaseconfig.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
// Logging specifies the options of logging.
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
Logging registry.LoggingConfiguration `json:"logging,omitempty"`
}

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -56,6 +57,9 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
out.NodeSelector = in.NodeSelector
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods
out.IgnorePVCPods = in.IgnorePVCPods
out.LeaderElection = in.LeaderElection
out.Logging = in.Logging
return nil
}
@@ -72,6 +76,9 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
out.NodeSelector = in.NodeSelector
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods
out.IgnorePVCPods = in.IgnorePVCPods
out.LeaderElection = in.LeaderElection
out.Logging = in.Logging
return nil
}

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -28,6 +29,8 @@ import (
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
out.LeaderElection = in.LeaderElection
in.Logging.DeepCopyInto(&out.Logging)
return
}

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -0,0 +1,203 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
v1 "k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
)
const (
// MinResourcePercentage is the minimum value of a resource's percentage
MinResourcePercentage = 0
// MaxResourcePercentage is the maximum value of a resource's percentage
MaxResourcePercentage = 100
)
// ValidateRemovePodsViolatingNodeTaintsArgs validates RemovePodsViolatingNodeTaints arguments
func ValidateRemovePodsViolatingNodeTaintsArgs(args *componentconfig.RemovePodsViolatingNodeTaintsArgs) error {
return errorsAggregate(
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
)
}
// ValidateRemoveFailedPodsArgs validates RemoveFailedPods arguments
func ValidateRemoveFailedPodsArgs(args *componentconfig.RemoveFailedPodsArgs) error {
return errorsAggregate(
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
)
}
// ValidateRemovePodsHavingTooManyRestartsArgs validates RemovePodsHavingTooManyRestarts arguments
func ValidateRemovePodsHavingTooManyRestartsArgs(args *componentconfig.RemovePodsHavingTooManyRestartsArgs) error {
return errorsAggregate(
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
validatePodRestartThreshold(args.PodRestartThreshold),
)
}
// ValidateRemovePodsViolatingNodeAffinityArgs validates RemovePodsViolatingNodeAffinity arguments
func ValidateRemovePodsViolatingNodeAffinityArgs(args *componentconfig.RemovePodsViolatingNodeAffinityArgs) error {
var err error
if args == nil || len(args.NodeAffinityType) == 0 {
err = fmt.Errorf("nodeAffinityType needs to be set")
}
return errorsAggregate(
err,
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
)
}
// ValidatePodLifeTimeArgs validates PodLifeTime arguments
func ValidatePodLifeTimeArgs(args *componentconfig.PodLifeTimeArgs) error {
var err error
if args.MaxPodLifeTimeSeconds == nil {
err = fmt.Errorf("MaxPodLifeTimeSeconds not set")
}
return errorsAggregate(
err,
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
validatePodLifeTimeStates(args.States),
)
}
func ValidateRemoveDuplicatesArgs(args *componentconfig.RemoveDuplicatesArgs) error {
return validateNamespaceArgs(args.Namespaces)
}
// ValidateRemovePodsViolatingTopologySpreadConstraintArgs validates RemovePodsViolatingTopologySpreadConstraint arguments
func ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args *componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs) error {
return errorsAggregate(
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
)
}
// ValidateRemovePodsViolatingInterPodAntiAffinityArgs validates ValidateRemovePodsViolatingInterPodAntiAffinity arguments
func ValidateRemovePodsViolatingInterPodAntiAffinityArgs(args *componentconfig.RemovePodsViolatingInterPodAntiAffinityArgs) error {
return errorsAggregate(
validateNamespaceArgs(args.Namespaces),
validateLabelSelectorArgs(args.LabelSelector),
)
}
// errorsAggregate converts all arg validation errors to a single error interface.
// if no errors, it will return nil.
func errorsAggregate(errors ...error) error {
return utilerrors.NewAggregate(errors)
}
func validateNamespaceArgs(namespaces *api.Namespaces) error {
// At most one of include/exclude can be set
if namespaces != nil && len(namespaces.Include) > 0 && len(namespaces.Exclude) > 0 {
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
}
return nil
}
func validateLabelSelectorArgs(labelSelector *metav1.LabelSelector) error {
if labelSelector != nil {
if _, err := metav1.LabelSelectorAsSelector(labelSelector); err != nil {
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
}
}
return nil
}
func validatePodRestartThreshold(podRestartThreshold int32) error {
if podRestartThreshold < 1 {
return fmt.Errorf("PodsHavingTooManyRestarts threshold not set")
}
return nil
}
func validatePodLifeTimeStates(states []string) error {
podLifeTimeAllowedStates := sets.NewString(
string(v1.PodRunning),
string(v1.PodPending),
// Container state reasons: https://github.com/kubernetes/kubernetes/blob/release-1.24/pkg/kubelet/kubelet_pods.go#L76-L79
"PodInitializing",
"ContainerCreating",
)
if !podLifeTimeAllowedStates.HasAll(states...) {
return fmt.Errorf("states must be one of %v", podLifeTimeAllowedStates.List())
}
return nil
}
func ValidateHighNodeUtilizationArgs(args *componentconfig.HighNodeUtilizationArgs) error {
return validateThresholds(args.Thresholds)
}
func ValidateLowNodeUtilizationArgs(args *componentconfig.LowNodeUtilizationArgs) error {
return validateLowNodeUtilizationThresholds(args.Thresholds, args.TargetThresholds, args.UseDeviationThresholds)
}
func validateLowNodeUtilizationThresholds(thresholds, targetThresholds api.ResourceThresholds, useDeviationThresholds bool) error {
// validate thresholds and targetThresholds config
if err := validateThresholds(thresholds); err != nil {
return fmt.Errorf("thresholds config is not valid: %v", err)
}
if err := validateThresholds(targetThresholds); err != nil {
return fmt.Errorf("targetThresholds config is not valid: %v", err)
}
// validate if thresholds and targetThresholds have same resources configured
if len(thresholds) != len(targetThresholds) {
return fmt.Errorf("thresholds and targetThresholds configured different resources")
}
for resourceName, value := range thresholds {
if targetValue, ok := targetThresholds[resourceName]; !ok {
return fmt.Errorf("thresholds and targetThresholds configured different resources")
} else if value > targetValue && !useDeviationThresholds {
return fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", resourceName)
}
}
return nil
}
// validateThresholds checks if thresholds have valid resource name and resource percentage configured
func validateThresholds(thresholds api.ResourceThresholds) error {
if len(thresholds) == 0 {
return fmt.Errorf("no resource threshold is configured")
}
for name, percent := range thresholds {
if percent < MinResourcePercentage || percent > MaxResourcePercentage {
return fmt.Errorf("%v threshold not in [%v, %v] range", name, MinResourcePercentage, MaxResourcePercentage)
}
}
return nil
}

View File

@@ -0,0 +1,331 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
)
func TestValidateRemovePodsViolatingNodeTaintsArgs(t *testing.T) {
testCases := []struct {
description string
args *componentconfig.RemovePodsViolatingNodeTaintsArgs
expectError bool
}{
{
description: "valid namespace args, no errors",
args: &componentconfig.RemovePodsViolatingNodeTaintsArgs{
Namespaces: &api.Namespaces{
Include: []string{"default"},
},
},
expectError: false,
},
{
description: "invalid namespaces args, expects error",
args: &componentconfig.RemovePodsViolatingNodeTaintsArgs{
Namespaces: &api.Namespaces{
Include: []string{"default"},
Exclude: []string{"kube-system"},
},
},
expectError: true,
},
{
description: "valid label selector args, no errors",
args: &componentconfig.RemovePodsViolatingNodeTaintsArgs{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"role.kubernetes.io/node": ""},
},
},
expectError: false,
},
{
description: "invalid label selector args, expects errors",
args: &componentconfig.RemovePodsViolatingNodeTaintsArgs{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Operator: metav1.LabelSelectorOpIn,
},
},
},
},
expectError: true,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidateRemovePodsViolatingNodeTaintsArgs(tc.args)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
}
})
}
}
func TestValidateRemovePodsViolatingNodeAffinityArgs(t *testing.T) {
testCases := []struct {
description string
args *componentconfig.RemovePodsViolatingNodeAffinityArgs
expectError bool
}{
{
description: "nil NodeAffinityType args, expects errors",
args: &componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: nil,
},
expectError: true,
},
{
description: "empty NodeAffinityType args, expects errors",
args: &componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{},
},
expectError: true,
},
{
description: "valid NodeAffinityType args, no errors",
args: &componentconfig.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
expectError: false,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidateRemovePodsViolatingNodeAffinityArgs(tc.args)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
}
})
}
}
func TestValidateRemovePodLifeTimeArgs(t *testing.T) {
testCases := []struct {
description string
args *componentconfig.PodLifeTimeArgs
expectError bool
}{
{
description: "valid arg, no errors",
args: &componentconfig.PodLifeTimeArgs{
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
States: []string{string(v1.PodRunning)},
},
expectError: false,
},
{
description: "nil MaxPodLifeTimeSeconds arg, expects errors",
args: &componentconfig.PodLifeTimeArgs{
MaxPodLifeTimeSeconds: nil,
},
expectError: true,
},
{
description: "invalid pod state arg, expects errors",
args: &componentconfig.PodLifeTimeArgs{
States: []string{string(v1.NodeRunning)},
},
expectError: true,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidatePodLifeTimeArgs(tc.args)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
}
})
}
}
func TestValidateLowNodeUtilizationPluginConfig(t *testing.T) {
var extendedResource = v1.ResourceName("example.com/foo")
tests := []struct {
name string
thresholds api.ResourceThresholds
targetThresholds api.ResourceThresholds
errInfo error
}{
{
name: "passing invalid thresholds",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 120,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
},
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
},
{
name: "thresholds and targetThresholds configured different num of resources",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
v1.ResourcePods: 80,
},
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
},
{
name: "thresholds and targetThresholds configured different resources",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourcePods: 80,
},
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
},
{
name: "thresholds' CPU config value is greater than targetThresholds'",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 90,
v1.ResourceMemory: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
},
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", v1.ResourceCPU),
},
{
name: "only thresholds configured extended resource",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
extendedResource: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
},
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
},
{
name: "only targetThresholds configured extended resource",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
extendedResource: 80,
},
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
},
{
name: "thresholds and targetThresholds configured different extended resources",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
extendedResource: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
"example.com/bar": 80,
},
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
},
{
name: "thresholds' extended resource config value is greater than targetThresholds'",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
extendedResource: 90,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
extendedResource: 20,
},
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", extendedResource),
},
{
name: "passing valid plugin config",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
},
errInfo: nil,
},
{
name: "passing valid plugin config with extended resource",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
extendedResource: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
extendedResource: 80,
},
errInfo: nil,
},
}
for _, testCase := range tests {
args := &componentconfig.LowNodeUtilizationArgs{
Thresholds: testCase.thresholds,
TargetThresholds: testCase.targetThresholds,
}
validateErr := validateLowNodeUtilizationThresholds(args.Thresholds, args.TargetThresholds, false)
if validateErr == nil || testCase.errInfo == nil {
if validateErr != testCase.errInfo {
t.Errorf("expected validity of plugin config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
}
} else if validateErr.Error() != testCase.errInfo.Error() {
t.Errorf("expected validity of plugin config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
}
}
}

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -21,13 +22,17 @@ limitations under the License.
package componentconfig
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
api "sigs.k8s.io/descheduler/pkg/api"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
out.LeaderElection = in.LeaderElection
in.Logging.DeepCopyInto(&out.Logging)
return
}
@@ -48,3 +53,389 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HighNodeUtilizationArgs) DeepCopyInto(out *HighNodeUtilizationArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Thresholds != nil {
in, out := &in.Thresholds, &out.Thresholds
*out = make(api.ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HighNodeUtilizationArgs.
func (in *HighNodeUtilizationArgs) DeepCopy() *HighNodeUtilizationArgs {
if in == nil {
return nil
}
out := new(HighNodeUtilizationArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HighNodeUtilizationArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LowNodeUtilizationArgs) DeepCopyInto(out *LowNodeUtilizationArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Thresholds != nil {
in, out := &in.Thresholds, &out.Thresholds
*out = make(api.ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.TargetThresholds != nil {
in, out := &in.TargetThresholds, &out.TargetThresholds
*out = make(api.ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LowNodeUtilizationArgs.
func (in *LowNodeUtilizationArgs) DeepCopy() *LowNodeUtilizationArgs {
if in == nil {
return nil
}
out := new(LowNodeUtilizationArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LowNodeUtilizationArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodLifeTimeArgs) DeepCopyInto(out *PodLifeTimeArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.MaxPodLifeTimeSeconds != nil {
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
*out = new(uint)
**out = **in
}
if in.States != nil {
in, out := &in.States, &out.States
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTimeArgs.
func (in *PodLifeTimeArgs) DeepCopy() *PodLifeTimeArgs {
if in == nil {
return nil
}
out := new(PodLifeTimeArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodLifeTimeArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoveDuplicatesArgs) DeepCopyInto(out *RemoveDuplicatesArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveDuplicatesArgs.
func (in *RemoveDuplicatesArgs) DeepCopy() *RemoveDuplicatesArgs {
if in == nil {
return nil
}
out := new(RemoveDuplicatesArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemoveDuplicatesArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoveFailedPodsArgs) DeepCopyInto(out *RemoveFailedPodsArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MinPodLifetimeSeconds != nil {
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
*out = new(uint)
**out = **in
}
if in.Reasons != nil {
in, out := &in.Reasons, &out.Reasons
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveFailedPodsArgs.
func (in *RemoveFailedPodsArgs) DeepCopy() *RemoveFailedPodsArgs {
if in == nil {
return nil
}
out := new(RemoveFailedPodsArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemoveFailedPodsArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemovePodsHavingTooManyRestartsArgs) DeepCopyInto(out *RemovePodsHavingTooManyRestartsArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemovePodsHavingTooManyRestartsArgs.
func (in *RemovePodsHavingTooManyRestartsArgs) DeepCopy() *RemovePodsHavingTooManyRestartsArgs {
if in == nil {
return nil
}
out := new(RemovePodsHavingTooManyRestartsArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemovePodsHavingTooManyRestartsArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemovePodsViolatingInterPodAntiAffinityArgs) DeepCopyInto(out *RemovePodsViolatingInterPodAntiAffinityArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemovePodsViolatingInterPodAntiAffinityArgs.
func (in *RemovePodsViolatingInterPodAntiAffinityArgs) DeepCopy() *RemovePodsViolatingInterPodAntiAffinityArgs {
if in == nil {
return nil
}
out := new(RemovePodsViolatingInterPodAntiAffinityArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemovePodsViolatingInterPodAntiAffinityArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemovePodsViolatingNodeAffinityArgs) DeepCopyInto(out *RemovePodsViolatingNodeAffinityArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.NodeAffinityType != nil {
in, out := &in.NodeAffinityType, &out.NodeAffinityType
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemovePodsViolatingNodeAffinityArgs.
func (in *RemovePodsViolatingNodeAffinityArgs) DeepCopy() *RemovePodsViolatingNodeAffinityArgs {
if in == nil {
return nil
}
out := new(RemovePodsViolatingNodeAffinityArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemovePodsViolatingNodeAffinityArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemovePodsViolatingNodeTaintsArgs) DeepCopyInto(out *RemovePodsViolatingNodeTaintsArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ExcludedTaints != nil {
in, out := &in.ExcludedTaints, &out.ExcludedTaints
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemovePodsViolatingNodeTaintsArgs.
func (in *RemovePodsViolatingNodeTaintsArgs) DeepCopy() *RemovePodsViolatingNodeTaintsArgs {
if in == nil {
return nil
}
out := new(RemovePodsViolatingNodeTaintsArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemovePodsViolatingNodeTaintsArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemovePodsViolatingTopologySpreadConstraintArgs) DeepCopyInto(out *RemovePodsViolatingTopologySpreadConstraintArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemovePodsViolatingTopologySpreadConstraintArgs.
func (in *RemovePodsViolatingTopologySpreadConstraintArgs) DeepCopy() *RemovePodsViolatingTopologySpreadConstraintArgs {
if in == nil {
return nil
}
out := new(RemovePodsViolatingTopologySpreadConstraintArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *RemovePodsViolatingTopologySpreadConstraintArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@@ -26,7 +26,7 @@ import (
"k8s.io/client-go/tools/clientcmd"
)
func CreateClient(kubeconfig string) (clientset.Interface, error) {
func CreateClient(kubeconfig string, userAgt string) (clientset.Interface, error) {
var cfg *rest.Config
if len(kubeconfig) != 0 {
master, err := GetMasterFromKubeconfig(kubeconfig)
@@ -47,7 +47,11 @@ func CreateClient(kubeconfig string) (clientset.Interface, error) {
}
}
return clientset.NewForConfig(cfg)
if len(userAgt) != 0 {
return clientset.NewForConfig(rest.AddUserAgent(cfg, userAgt))
} else {
return clientset.NewForConfig(cfg)
}
}
func GetMasterFromKubeconfig(filename string) (string, error) {

View File

@@ -20,28 +20,43 @@ import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
corev1informers "k8s.io/client-go/informers/core/v1"
schedulingv1informers "k8s.io/client-go/informers/scheduling/v1"
clientset "k8s.io/client-go/kubernetes"
fakeclientset "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/metrics"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/client"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
"sigs.k8s.io/descheduler/pkg/utils"
)
func Run(rs *options.DeschedulerServer) error {
ctx := context.Background()
rsclient, err := client.CreateClient(rs.KubeconfigFile)
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
metrics.Register()
rsclient, eventClient, err := createClients(rs.KubeconfigFile)
if err != nil {
return err
}
rs.Client = rsclient
rs.EventClient = eventClient
deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile)
if err != nil {
@@ -56,79 +71,379 @@ func Run(rs *options.DeschedulerServer) error {
return err
}
stopChannel := make(chan struct{})
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
}
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor)
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
sharedInformerFactory.Start(stopChannel)
sharedInformerFactory.WaitForCacheSync(stopChannel)
strategyFuncs := map[string]strategyFunction{
"RemoveDuplicates": strategies.RemoveDuplicatePods,
"LowNodeUtilization": strategies.LowNodeUtilization,
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
"RemovePodsViolatingNodeAffinity": strategies.RemovePodsViolatingNodeAffinity,
"RemovePodsViolatingNodeTaints": strategies.RemovePodsViolatingNodeTaints,
"RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts,
"PodLifeTime": strategies.PodLifeTime,
"RemovePodsViolatingTopologySpreadConstraint": strategies.RemovePodsViolatingTopologySpreadConstraint,
runFn := func() error {
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion)
}
nodeSelector := rs.NodeSelector
if rs.LeaderElection.LeaderElect && rs.DeschedulingInterval.Seconds() == 0 {
return fmt.Errorf("leaderElection must be used with deschedulingInterval")
}
if rs.LeaderElection.LeaderElect && !rs.DryRun {
if err := NewLeaderElection(runFn, rsclient, &rs.LeaderElection, ctx); err != nil {
return fmt.Errorf("leaderElection: %w", err)
}
return nil
}
return runFn()
}
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter framework.EvictorPlugin, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc)
func cachedClient(
realClient clientset.Interface,
podInformer corev1informers.PodInformer,
nodeInformer corev1informers.NodeInformer,
namespaceInformer corev1informers.NamespaceInformer,
priorityClassInformer schedulingv1informers.PriorityClassInformer,
) (clientset.Interface, error) {
fakeClient := fakeclientset.NewSimpleClientset()
// simulate a pod eviction by deleting a pod
fakeClient.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" {
createAct, matched := action.(core.CreateActionImpl)
if !matched {
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
}
eviction, matched := createAct.Object.(*policy.Eviction)
if !matched {
return false, nil, fmt.Errorf("unable to convert action object into *policy.Eviction")
}
if err := fakeClient.Tracker().Delete(action.GetResource(), eviction.GetNamespace(), eviction.GetName()); err != nil {
return false, nil, fmt.Errorf("unable to delete pod %v/%v: %v", eviction.GetNamespace(), eviction.GetName(), err)
}
return true, nil, nil
}
// fallback to the default reactor
return false, nil, nil
})
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
pods, err := podInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list pods: %v", err)
}
for _, item := range pods {
if _, err := fakeClient.CoreV1().Pods(item.Namespace).Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy pod: %v", err)
}
}
nodes, err := nodeInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list nodes: %v", err)
}
for _, item := range nodes {
if _, err := fakeClient.CoreV1().Nodes().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy node: %v", err)
}
}
namespaces, err := namespaceInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list namespaces: %v", err)
}
for _, item := range namespaces {
if _, err := fakeClient.CoreV1().Namespaces().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy node: %v", err)
}
}
priorityClasses, err := priorityClassInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list priorityclasses: %v", err)
}
for _, item := range priorityClasses {
if _, err := fakeClient.SchedulingV1().PriorityClasses().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy priorityclass: %v", err)
}
}
return fakeClient, nil
}
// evictorImpl implements the Evictor interface so plugins
// can evict a pod without importing a specific pod evictor
type evictorImpl struct {
podEvictor *evictions.PodEvictor
evictorFilter framework.EvictorPlugin
}
var _ framework.Evictor = &evictorImpl{}
// Filter checks if a pod can be evicted
func (ei *evictorImpl) Filter(pod *v1.Pod) bool {
return ei.evictorFilter.Filter(pod)
}
// Evict evicts a pod (no pre-check performed)
func (ei *evictorImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
return ei.podEvictor.EvictPod(ctx, pod, opts)
}
func (ei *evictorImpl) NodeLimitExceeded(node *v1.Node) bool {
return ei.podEvictor.NodeLimitExceeded(node)
}
// handleImpl implements the framework handle which gets passed to plugins
type handleImpl struct {
clientSet clientset.Interface
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
sharedInformerFactory informers.SharedInformerFactory
evictor *evictorImpl
}
var _ framework.Handle = &handleImpl{}
// ClientSet retrieves kube client set
func (hi *handleImpl) ClientSet() clientset.Interface {
return hi.clientSet
}
// GetPodsAssignedToNodeFunc retrieves GetPodsAssignedToNodeFunc implementation
func (hi *handleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
return hi.getPodsAssignedToNodeFunc
}
// SharedInformerFactory retrieves shared informer factory
func (hi *handleImpl) SharedInformerFactory() informers.SharedInformerFactory {
return hi.sharedInformerFactory
}
// Evictor retrieves evictor so plugins can filter and evict pods
func (hi *handleImpl) Evictor() framework.Evictor {
return hi.evictor
}
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
podInformer := sharedInformerFactory.Core().V1().Pods()
namespaceInformer := sharedInformerFactory.Core().V1().Namespaces()
priorityClassInformer := sharedInformerFactory.Scheduling().V1().PriorityClasses()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// create the informers
namespaceInformer.Informer()
priorityClassInformer.Informer()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
return fmt.Errorf("build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
strategyFuncs := map[api.StrategyName]strategyFunction{
"RemoveDuplicates": nil,
"LowNodeUtilization": nil,
"HighNodeUtilization": nil,
"RemovePodsViolatingInterPodAntiAffinity": nil,
"RemovePodsViolatingNodeAffinity": nil,
"RemovePodsViolatingNodeTaints": nil,
"RemovePodsHavingTooManyRestarts": nil,
"PodLifeTime": nil,
"RemovePodsViolatingTopologySpreadConstraint": nil,
"RemoveFailedPods": nil,
}
var nodeSelector string
if deschedulerPolicy.NodeSelector != nil {
nodeSelector = *deschedulerPolicy.NodeSelector
}
evictLocalStoragePods := rs.EvictLocalStoragePods
var evictLocalStoragePods bool
if deschedulerPolicy.EvictLocalStoragePods != nil {
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
}
maxNoOfPodsToEvictPerNode := rs.MaxNoOfPodsToEvictPerNode
if deschedulerPolicy.MaxNoOfPodsToEvictPerNode != nil {
maxNoOfPodsToEvictPerNode = *deschedulerPolicy.MaxNoOfPodsToEvictPerNode
evictBarePods := false
if deschedulerPolicy.EvictFailedBarePods != nil {
evictBarePods = *deschedulerPolicy.EvictFailedBarePods
if evictBarePods {
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
}
}
wait.Until(func() {
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector, stopChannel)
evictSystemCriticalPods := false
if deschedulerPolicy.EvictSystemCriticalPods != nil {
evictSystemCriticalPods = *deschedulerPolicy.EvictSystemCriticalPods
if evictSystemCriticalPods {
klog.V(1).InfoS("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
}
}
ignorePvcPods := false
if deschedulerPolicy.IgnorePVCPods != nil {
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
}
var eventClient clientset.Interface
if rs.DryRun {
eventClient = fakeclientset.NewSimpleClientset()
} else {
eventClient = rs.Client
}
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
defer eventBroadcaster.Shutdown()
wait.NonSlidingUntil(func() {
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector)
if err != nil {
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
close(stopChannel)
cancel()
return
}
if len(nodes) <= 1 {
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
close(stopChannel)
cancel()
return
}
var podEvictorClient clientset.Interface
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
// So when evicting pods while running multiple strategies in a row have the cummulative effect
// as is when evicting pods for real.
if rs.DryRun {
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
// Create a new cache so we start from scratch without any leftovers
fakeClient, err := cachedClient(rs.Client, podInformer, nodeInformer, namespaceInformer, priorityClassInformer)
if err != nil {
klog.Error(err)
return
}
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods())
if err != nil {
klog.Errorf("build get pods assigned to node function error: %v", err)
return
}
fakeCtx, cncl := context.WithCancel(context.TODO())
defer cncl()
fakeSharedInformerFactory.Start(fakeCtx.Done())
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
podEvictorClient = fakeClient
} else {
podEvictorClient = rs.Client
}
klog.V(3).Infof("Building a pod evictor")
podEvictor := evictions.NewPodEvictor(
rs.Client,
podEvictorClient,
evictionPolicyGroupVersion,
rs.DryRun,
maxNoOfPodsToEvictPerNode,
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
nodes,
evictLocalStoragePods,
!rs.DisableMetrics,
eventRecorder,
)
for name, f := range strategyFuncs {
if strategy := deschedulerPolicy.Strategies[api.StrategyName(name)]; strategy.Enabled {
f(ctx, rs.Client, strategy, nodes, podEvictor)
for name, strategy := range deschedulerPolicy.Strategies {
if f, ok := strategyFuncs[name]; ok {
if strategy.Enabled {
params := strategy.Params
if params == nil {
params = &api.StrategyParameters{}
}
nodeFit := false
if name != "PodLifeTime" {
nodeFit = params.NodeFit
}
// TODO(jchaloup): once all strategies are migrated move this check under
// the default evictor args validation
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
klog.V(1).ErrorS(fmt.Errorf("priority threshold misconfigured"), "only one of priorityThreshold fields can be set", "pluginName", name)
continue
}
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, rs.Client, strategy.Params)
if err != nil {
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
continue
}
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: evictLocalStoragePods,
EvictSystemCriticalPods: evictSystemCriticalPods,
IgnorePvcPods: ignorePvcPods,
EvictFailedBarePods: evictBarePods,
NodeFit: nodeFit,
PriorityThreshold: &api.PriorityThreshold{
Value: &thresholdPriority,
},
}
evictorFilter, _ := defaultevictor.New(
defaultevictorArgs,
&handleImpl{
clientSet: rs.Client,
getPodsAssignedToNodeFunc: getPodsAssignedToNode,
sharedInformerFactory: sharedInformerFactory,
},
)
handle := &handleImpl{
clientSet: rs.Client,
getPodsAssignedToNodeFunc: getPodsAssignedToNode,
sharedInformerFactory: sharedInformerFactory,
evictor: &evictorImpl{
podEvictor: podEvictor,
evictorFilter: evictorFilter.(framework.EvictorPlugin),
},
}
// TODO: strategyName should be accessible from within the strategy using a framework
// handle or function which the Evictor has access to. For migration/in-progress framework
// work, we are currently passing this via context. To be removed
// (See discussion thread https://github.com/kubernetes-sigs/descheduler/pull/885#discussion_r919962292)
childCtx := context.WithValue(ctx, "strategyName", string(name))
if pgFnc, exists := pluginsMap[string(name)]; exists {
pgFnc(childCtx, nodes, params, handle)
} else {
f(childCtx, rs.Client, strategy, nodes, podEvictor, evictorFilter.(framework.EvictorPlugin), getPodsAssignedToNode)
}
}
} else {
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
}
}
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
if rs.DeschedulingInterval.Seconds() == 0 {
close(stopChannel)
cancel()
}
}, rs.DeschedulingInterval, stopChannel)
}, rs.DeschedulingInterval, ctx.Done())
return nil
}
func createClients(kubeconfig string) (clientset.Interface, clientset.Interface, error) {
kClient, err := client.CreateClient(kubeconfig, "descheduler")
if err != nil {
return nil, nil, err
}
eventClient, err := client.CreateClient(kubeconfig, "")
if err != nil {
return nil, nil, err
}
return kClient, eventClient, nil
}

View File

@@ -3,14 +3,15 @@ package descheduler
import (
"context"
"fmt"
"strings"
"testing"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/runtime"
fakeclientset "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/test"
@@ -27,6 +28,7 @@ func TestTaintsUpdated(t *testing.T) {
}
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
eventClient := fakeclientset.NewSimpleClientset(n1, n2, p1)
dp := &api.DeschedulerPolicy{
Strategies: api.StrategyList{
"RemovePodsViolatingNodeTaints": api.DeschedulerStrategy{
@@ -35,24 +37,13 @@ func TestTaintsUpdated(t *testing.T) {
},
}
stopChannel := make(chan struct{})
defer close(stopChannel)
rs, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("Unable to initialize server: %v", err)
}
rs.Client = client
rs.DeschedulingInterval = 100 * time.Millisecond
go func() {
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1", stopChannel)
if err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
}()
rs.EventClient = eventClient
// Wait for few cycles and then verify the only pod still exists
time.Sleep(300 * time.Millisecond)
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Unable to list pods: %v", err)
@@ -74,24 +65,154 @@ func TestTaintsUpdated(t *testing.T) {
t.Fatalf("Unable to update node: %v\n", err)
}
if err := wait.PollImmediate(100*time.Millisecond, time.Second, func() (bool, error) {
// Get over evicted pod result in panic
//pods, err := client.CoreV1().Pods(p1.Namespace).Get(p1.Name, metav1.GetOptions{})
// List is better, it does not panic.
// Though once the pod is evicted, List starts to error with "can't assign or convert v1beta1.Eviction into v1.Pod"
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err == nil {
if len(pods.Items) > 0 {
return false, nil
}
return true, nil
}
if strings.Contains(err.Error(), "can't assign or convert v1beta1.Eviction into v1.Pod") {
return true, nil
}
var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
return false, nil
}); err != nil {
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies")
if err := RunDeschedulerStrategies(ctx, rs, dp, "v1"); err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
if len(evictedPods) != 1 {
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
}
}
func TestDuplicate(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
p1.Namespace = "dev"
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
p2.Namespace = "dev"
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
p3.Namespace = "dev"
ownerRef1 := test.GetReplicaSetOwnerRefList()
p1.ObjectMeta.OwnerReferences = ownerRef1
p2.ObjectMeta.OwnerReferences = ownerRef1
p3.ObjectMeta.OwnerReferences = ownerRef1
client := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
eventClient := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
dp := &api.DeschedulerPolicy{
Strategies: api.StrategyList{
"RemoveDuplicates": api.DeschedulerStrategy{
Enabled: true,
},
},
}
rs, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("Unable to initialize server: %v", err)
}
rs.Client = client
rs.EventClient = eventClient
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Unable to list pods: %v", err)
}
if len(pods.Items) != 3 {
t.Errorf("Pods number should be 3 before evict")
}
var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
if err := RunDeschedulerStrategies(ctx, rs, dp, "v1"); err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
if len(evictedPods) == 0 {
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
}
}
func TestRootCancel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
client := fakeclientset.NewSimpleClientset(n1, n2)
eventClient := fakeclientset.NewSimpleClientset(n1, n2)
dp := &api.DeschedulerPolicy{
Strategies: api.StrategyList{}, // no strategies needed for this test
}
rs, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("Unable to initialize server: %v", err)
}
rs.Client = client
rs.EventClient = eventClient
rs.DeschedulingInterval = 100 * time.Millisecond
errChan := make(chan error, 1)
defer close(errChan)
go func() {
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
errChan <- err
}()
cancel()
select {
case err := <-errChan:
if err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
case <-time.After(1 * time.Second):
t.Fatal("Root ctx should have canceled immediately")
}
}
func TestRootCancelWithNoInterval(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
client := fakeclientset.NewSimpleClientset(n1, n2)
eventClient := fakeclientset.NewSimpleClientset(n1, n2)
dp := &api.DeschedulerPolicy{
Strategies: api.StrategyList{}, // no strategies needed for this test
}
rs, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("Unable to initialize server: %v", err)
}
rs.Client = client
rs.EventClient = eventClient
rs.DeschedulingInterval = 0
errChan := make(chan error, 1)
defer close(errChan)
go func() {
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
errChan <- err
}()
cancel()
select {
case err := <-errChan:
if err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
case <-time.After(1 * time.Second):
t.Fatal("Root ctx should have canceled immediately")
}
}
func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
return func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" {
createAct, matched := action.(core.CreateActionImpl)
if !matched {
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
}
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
*evictedPods = append(*evictedPods, eviction.GetName())
}
}
return false, nil, nil // fallback to the default reactor
}
}

View File

@@ -19,115 +19,158 @@ package evictions
import (
"context"
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
policy "k8s.io/api/policy/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/events"
"k8s.io/klog/v2"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/metrics"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
)
const (
evictPodAnnotationKey = "descheduler.alpha.kubernetes.io/evict"
)
// nodePodEvictedCount keeps count of pods evicted on node
type nodePodEvictedCount map[*v1.Node]int
type nodePodEvictedCount map[string]uint
type namespacePodEvictCount map[string]uint
type PodEvictor struct {
client clientset.Interface
policyGroupVersion string
dryRun bool
maxPodsToEvictPerNode int
nodepodCount nodePodEvictedCount
evictLocalStoragePods bool
client clientset.Interface
nodes []*v1.Node
policyGroupVersion string
dryRun bool
maxPodsToEvictPerNode *uint
maxPodsToEvictPerNamespace *uint
nodepodCount nodePodEvictedCount
namespacePodCount namespacePodEvictCount
metricsEnabled bool
eventRecorder events.EventRecorder
}
func NewPodEvictor(
client clientset.Interface,
policyGroupVersion string,
dryRun bool,
maxPodsToEvictPerNode int,
maxPodsToEvictPerNode *uint,
maxPodsToEvictPerNamespace *uint,
nodes []*v1.Node,
evictLocalStoragePods bool,
metricsEnabled bool,
eventRecorder events.EventRecorder,
) *PodEvictor {
var nodePodCount = make(nodePodEvictedCount)
var namespacePodCount = make(namespacePodEvictCount)
for _, node := range nodes {
// Initialize podsEvicted till now with 0.
nodePodCount[node] = 0
nodePodCount[node.Name] = 0
}
return &PodEvictor{
client: client,
policyGroupVersion: policyGroupVersion,
dryRun: dryRun,
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
nodepodCount: nodePodCount,
evictLocalStoragePods: evictLocalStoragePods,
client: client,
nodes: nodes,
policyGroupVersion: policyGroupVersion,
dryRun: dryRun,
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
nodepodCount: nodePodCount,
namespacePodCount: namespacePodCount,
metricsEnabled: metricsEnabled,
eventRecorder: eventRecorder,
}
}
// NodeEvicted gives a number of pods evicted for node
func (pe *PodEvictor) NodeEvicted(node *v1.Node) int {
return pe.nodepodCount[node]
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
return pe.nodepodCount[node.Name]
}
// TotalEvicted gives a number of pods evicted through all nodes
func (pe *PodEvictor) TotalEvicted() int {
var total int
func (pe *PodEvictor) TotalEvicted() uint {
var total uint
for _, count := range pe.nodepodCount {
total += count
}
return total
}
// EvictPod returns non-nil error only when evicting a pod on a node is not
// possible (due to maxPodsToEvictPerNode constraint). Success is true when the pod
// is evicted on the server side.
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node, reasons ...string) (bool, error) {
var reason string
if len(reasons) > 0 {
reason = " (" + strings.Join(reasons, ", ") + ")"
// NodeLimitExceeded checks if the number of evictions for a node was exceeded
func (pe *PodEvictor) NodeLimitExceeded(node *v1.Node) bool {
if pe.maxPodsToEvictPerNode != nil {
return pe.nodepodCount[node.Name] == *pe.maxPodsToEvictPerNode
}
if pe.maxPodsToEvictPerNode > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvictPerNode {
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvictPerNode, node.Name)
}
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
if err != nil {
// err is used only for logging purposes
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", reason)
return false, nil
}
pe.nodepodCount[node]++
if pe.dryRun {
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason)
} else {
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", reason)
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartStructuredLogging(3)
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", reason))
}
return true, nil
return false
}
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) error {
if dryRun {
return nil
// EvictOptions provides a handle for passing additional info to EvictPod
type EvictOptions struct {
// Reason allows for passing details about the specific eviction for logging.
Reason string
}
// EvictPod evicts a pod while exercising eviction limits.
// Returns true when the pod is evicted on the server side.
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) bool {
// TODO: Replace context-propagated Strategy name with a defined framework handle for accessing Strategy info
strategy := ""
if ctx.Value("strategyName") != nil {
strategy = ctx.Value("strategyName").(string)
}
if pod.Spec.NodeName != "" {
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
}
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per node reached"), "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
return false
}
}
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
}
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per namespace reached"), "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
return false
}
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
if err != nil {
// err is used only for logging purposes
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
}
return false
}
if pod.Spec.NodeName != "" {
pe.nodepodCount[pod.Spec.NodeName]++
}
pe.namespacePodCount[pod.Namespace]++
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
}
if pe.dryRun {
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
} else {
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
reason := opts.Reason
if len(reason) == 0 {
reason = strategy
if len(reason) == 0 {
reason = "NotSet"
}
}
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod evicted by sigs.k8s.io/descheduler")
}
return true
}
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string) error {
deleteOptions := &metav1.DeleteOptions{}
// GracePeriodSeconds ?
eviction := &policy.Eviction{
@@ -141,7 +184,7 @@ func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, poli
},
DeleteOptions: deleteOptions,
}
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(ctx, eviction)
err := client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction)
if apierrors.IsTooManyRequests(err) {
return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
@@ -151,123 +194,3 @@ func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, poli
}
return err
}
type Options struct {
priority *int32
}
// WithPriorityThreshold sets a threshold for pod's priority class.
// Any pod whose priority class is lower is evictable.
func WithPriorityThreshold(priority int32) func(opts *Options) {
return func(opts *Options) {
var p int32 = priority
opts.priority = &p
}
}
type constraint func(pod *v1.Pod) error
type evictable struct {
constraints []constraint
}
// Evictable provides an implementation of IsEvictable(IsEvictable(pod *v1.Pod) bool).
// The method accepts a list of options which allow to extend constraints
// which decides when a pod is considered evictable.
func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
options := &Options{}
for _, opt := range opts {
opt(options)
}
ev := &evictable{}
if !pe.evictLocalStoragePods {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if IsPodWithLocalStorage(pod) {
return fmt.Errorf("pod has local storage and descheduler is not configured with --evict-local-storage-pods")
}
return nil
})
}
if options.priority != nil {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if IsPodEvictableBasedOnPriority(pod, *options.priority) {
return nil
}
return fmt.Errorf("pod has higher priority than specified priority class threshold")
})
}
return ev
}
// IsEvictable decides when a pod is evictable
func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
checkErrs := []error{}
if IsCriticalPod(pod) {
checkErrs = append(checkErrs, fmt.Errorf("pod is critical"))
}
ownerRefList := podutil.OwnerRef(pod)
if IsDaemonsetPod(ownerRefList) {
checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod"))
}
if len(ownerRefList) == 0 {
checkErrs = append(checkErrs, fmt.Errorf("pod does not have any ownerrefs"))
}
if IsMirrorPod(pod) {
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
}
for _, c := range ev.constraints {
if err := c(pod); err != nil {
checkErrs = append(checkErrs, err)
}
}
if len(checkErrs) > 0 && !HaveEvictAnnotation(pod) {
klog.V(4).InfoS("Pod lacks an eviction annotation and fails the following checks", "pod", klog.KObj(pod), "checks", errors.NewAggregate(checkErrs).Error())
return false
}
return true
}
func IsCriticalPod(pod *v1.Pod) bool {
return utils.IsCriticalPod(pod)
}
func IsDaemonsetPod(ownerRefList []metav1.OwnerReference) bool {
for _, ownerRef := range ownerRefList {
if ownerRef.Kind == "DaemonSet" {
return true
}
}
return false
}
// IsMirrorPod checks whether the pod is a mirror pod.
func IsMirrorPod(pod *v1.Pod) bool {
return utils.IsMirrorPod(pod)
}
// HaveEvictAnnotation checks if the pod have evict annotation
func HaveEvictAnnotation(pod *v1.Pod) bool {
_, found := pod.ObjectMeta.Annotations[evictPodAnnotationKey]
return found
}
func IsPodWithLocalStorage(pod *v1.Pod) bool {
for _, volume := range pod.Spec.Volumes {
if volume.HostPath != nil || volume.EmptyDir != nil {
return true
}
}
return false
}
// IsPodEvictableBasedOnPriority checks if the given pod is evictable based on priority resolved from pod Spec.
func IsPodEvictableBasedOnPriority(pod *v1.Pod, priority int32) bool {
return pod.Spec.Priority == nil || *pod.Spec.Priority < priority
}

View File

@@ -62,202 +62,13 @@ func TestEvictPod(t *testing.T) {
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: test.pods}, nil
})
got := evictPod(ctx, fakeClient, test.pod, "v1", false)
got := evictPod(ctx, fakeClient, test.pod, "v1")
if got != test.want {
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
}
}
}
func TestIsEvictable(t *testing.T) {
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
lowPriority := int32(800)
highPriority := int32(900)
type testCase struct {
pod *v1.Pod
runBefore func(*v1.Pod)
evictLocalStoragePods bool
priorityThreshold *int32
result bool
}
testCases := []testCase{
{
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
},
evictLocalStoragePods: false,
result: true,
}, {
pod: test.BuildTestPod("p2", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod) {
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
},
evictLocalStoragePods: false,
result: true,
}, {
pod: test.BuildTestPod("p3", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
},
evictLocalStoragePods: false,
result: true,
}, {
pod: test.BuildTestPod("p4", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod) {
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
},
evictLocalStoragePods: false,
result: true,
}, {
pod: test.BuildTestPod("p5", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
},
evictLocalStoragePods: false,
result: false,
}, {
pod: test.BuildTestPod("p6", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
},
evictLocalStoragePods: true,
result: true,
}, {
pod: test.BuildTestPod("p7", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod) {
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
},
evictLocalStoragePods: false,
result: true,
}, {
pod: test.BuildTestPod("p8", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
},
evictLocalStoragePods: false,
result: false,
}, {
pod: test.BuildTestPod("p9", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod) {
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
},
evictLocalStoragePods: false,
result: true,
}, {
pod: test.BuildTestPod("p10", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Annotations = test.GetMirrorPodAnnotation()
},
evictLocalStoragePods: false,
result: false,
}, {
pod: test.BuildTestPod("p11", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Annotations = test.GetMirrorPodAnnotation()
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
},
evictLocalStoragePods: false,
result: true,
}, {
pod: test.BuildTestPod("p12", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
},
evictLocalStoragePods: false,
result: false,
}, {
pod: test.BuildTestPod("p13", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
pod.Annotations = map[string]string{
"descheduler.alpha.kubernetes.io/evict": "true",
}
},
evictLocalStoragePods: false,
result: true,
}, {
pod: test.BuildTestPod("p14", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Priority = &highPriority
},
evictLocalStoragePods: false,
priorityThreshold: &lowPriority,
result: false,
}, {
pod: test.BuildTestPod("p15", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.Spec.Priority = &highPriority
},
evictLocalStoragePods: false,
priorityThreshold: &lowPriority,
result: true,
},
}
for _, test := range testCases {
test.runBefore(test.pod)
podEvictor := &PodEvictor{
evictLocalStoragePods: test.evictLocalStoragePods,
}
evictable := podEvictor.Evictable()
if test.priorityThreshold != nil {
evictable = podEvictor.Evictable(WithPriorityThreshold(*test.priorityThreshold))
}
result := evictable.IsEvictable(test.pod)
if result != test.result {
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pod.Name, test.result, result)
}
}
}
func TestPodTypes(t *testing.T) {
n1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
@@ -286,18 +97,18 @@ func TestPodTypes(t *testing.T) {
}
// A Mirror Pod.
p4.Annotations = test.GetMirrorPodAnnotation()
if !IsMirrorPod(p4) {
if !utils.IsMirrorPod(p4) {
t.Errorf("Expected p4 to be a mirror pod.")
}
if !IsPodWithLocalStorage(p3) {
if !utils.IsPodWithLocalStorage(p3) {
t.Errorf("Expected p3 to be a pod with local storage.")
}
ownerRefList := podutil.OwnerRef(p2)
if !IsDaemonsetPod(ownerRefList) {
if !utils.IsDaemonsetPod(ownerRefList) {
t.Errorf("Expected p2 to be a daemonset pod.")
}
ownerRefList = podutil.OwnerRef(p1)
if IsDaemonsetPod(ownerRefList) || IsPodWithLocalStorage(p1) || IsCriticalPod(p1) || IsMirrorPod(p1) {
if utils.IsDaemonsetPod(ownerRefList) || utils.IsPodWithLocalStorage(p1) || utils.IsCriticalPriorityPod(p1) || utils.IsMirrorPod(p1) || utils.IsStaticPod(p1) {
t.Errorf("Expected p1 to be a normal pod.")
}

View File

@@ -0,0 +1,99 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package descheduler
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
componentbaseconfig "k8s.io/component-base/config"
"k8s.io/klog/v2"
"os"
)
// NewLeaderElection starts the leader election code loop
func NewLeaderElection(
run func() error,
client clientset.Interface,
LeaderElectionConfig *componentbaseconfig.LeaderElectionConfiguration,
ctx context.Context,
) error {
var id string
if hostname, err := os.Hostname(); err != nil {
// on errors, make sure we're unique
id = string(uuid.NewUUID())
} else {
// add a uniquifier so that two processes on the same host don't accidentally both become active
id = hostname + "_" + string(uuid.NewUUID())
}
klog.V(3).Infof("Assigned unique lease holder id: %s", id)
if len(LeaderElectionConfig.ResourceNamespace) == 0 {
return fmt.Errorf("namespace may not be empty")
}
if len(LeaderElectionConfig.ResourceName) == 0 {
return fmt.Errorf("name may not be empty")
}
lock, err := resourcelock.New(
LeaderElectionConfig.ResourceLock,
LeaderElectionConfig.ResourceNamespace,
LeaderElectionConfig.ResourceName,
client.CoreV1(),
client.CoordinationV1(),
resourcelock.ResourceLockConfig{
Identity: id,
},
)
if err != nil {
return fmt.Errorf("unable to create leader election lock: %v", err)
}
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: lock,
ReleaseOnCancel: true,
LeaseDuration: LeaderElectionConfig.LeaseDuration.Duration,
RenewDeadline: LeaderElectionConfig.RenewDeadline.Duration,
RetryPeriod: LeaderElectionConfig.RetryPeriod.Duration,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
klog.V(1).InfoS("Started leading")
err := run()
if err != nil {
klog.Error(err)
}
},
OnStoppedLeading: func() {
klog.V(1).InfoS("Leader lost")
},
OnNewLeader: func(identity string) {
// Just got the lock
if identity == id {
return
}
klog.V(1).Infof("New leader elected: %v", identity)
},
},
})
return nil
}

View File

@@ -18,19 +18,22 @@ package node
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
)
// ReadyNodes returns ready nodes irrespective of whether they are
// schedulable or not.
func ReadyNodes(ctx context.Context, client clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) {
func ReadyNodes(ctx context.Context, client clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeSelector string) ([]*v1.Node, error) {
ns, err := labels.Parse(nodeSelector)
if err != nil {
return []*v1.Node{}, err
@@ -96,45 +99,192 @@ func IsReady(node *v1.Node) bool {
return true
}
// IsNodeUnschedulable checks if the node is unschedulable. This is helper function to check only in case of
// underutilized node so that they won't be accounted for.
func IsNodeUnschedulable(node *v1.Node) bool {
return node.Spec.Unschedulable
// NodeFit returns true if the provided pod can be scheduled onto the provided node.
// This function is used when the NodeFit pod filtering feature of the Descheduler is enabled.
// This function currently considers a subset of the Kubernetes Scheduler's predicates when
// deciding if a pod would fit on a node, but more predicates may be added in the future.
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) []error {
// Check node selector and required affinity
var errors []error
if ok, err := utils.PodMatchNodeSelector(pod, node); err != nil {
errors = append(errors, err)
} else if !ok {
errors = append(errors, fmt.Errorf("pod node selector does not match the node label"))
}
// Check taints (we only care about NoSchedule and NoExecute taints)
ok := utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
})
if !ok {
errors = append(errors, fmt.Errorf("pod does not tolerate taints on the node"))
}
// Check if the pod can fit on a node based off it's requests
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
ok, reqErrors := fitsRequest(nodeIndexer, pod, node)
if !ok {
errors = append(errors, reqErrors...)
}
}
// Check if node is schedulable
if IsNodeUnschedulable(node) {
errors = append(errors, fmt.Errorf("node is not schedulable"))
}
return errors
}
// PodFitsAnyNode checks if the given pod fits any of the given nodes, based on
// multiple criteria, like, pod node selector matching the node label, node
// being schedulable or not.
func PodFitsAnyNode(pod *v1.Pod, nodes []*v1.Node) bool {
// PodFitsAnyOtherNode checks if the given pod will fit any of the given nodes, besides the node
// the pod is already running on. The predicates used to determine if the pod will fit can be found in the NodeFit function.
func PodFitsAnyOtherNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
for _, node := range nodes {
ok, err := utils.PodMatchNodeSelector(pod, node)
if err != nil || !ok {
// Skip node pod is already on
if node.Name == pod.Spec.NodeName {
continue
}
if !IsNodeUnschedulable(node) {
klog.V(2).InfoS("Pod can possibly be scheduled on a different node", "pod", klog.KObj(pod), "node", klog.KObj(node))
errors := NodeFit(nodeIndexer, pod, node)
if len(errors) == 0 {
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true
} else {
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
for _, err := range errors {
klog.V(4).InfoS(err.Error())
}
}
}
return false
}
// PodFitsCurrentNode checks if the given pod fits on the given node if the pod
// node selector matches the node label.
func PodFitsCurrentNode(pod *v1.Pod, node *v1.Node) bool {
ok, err := utils.PodMatchNodeSelector(pod, node)
if err != nil {
klog.ErrorS(err, "Failed to match node selector")
return false
// PodFitsAnyNode checks if the given pod will fit any of the given nodes. The predicates used
// to determine if the pod will fit can be found in the NodeFit function.
func PodFitsAnyNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
for _, node := range nodes {
errors := NodeFit(nodeIndexer, pod, node)
if len(errors) == 0 {
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true
} else {
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
for _, err := range errors {
klog.V(4).InfoS(err.Error())
}
}
}
return false
}
// PodFitsCurrentNode checks if the given pod will fit onto the given node. The predicates used
// to determine if the pod will fit can be found in the NodeFit function.
func PodFitsCurrentNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) bool {
errors := NodeFit(nodeIndexer, pod, node)
if len(errors) == 0 {
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true
} else {
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
for _, err := range errors {
klog.V(4).InfoS(err.Error())
}
}
return false
}
// IsNodeUnschedulable checks if the node is unschedulable. This is a helper function to check only in case of
// underutilized node so that they won't be accounted for.
func IsNodeUnschedulable(node *v1.Node) bool {
return node.Spec.Unschedulable
}
// fitsRequest determines if a pod can fit on a node based on its resource requests. It returns true if
// the pod will fit.
func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, []error) {
var insufficientResources []error
// Get pod requests
podRequests, _ := utils.PodRequestsAndLimits(pod)
resourceNames := make([]v1.ResourceName, 0, len(podRequests))
for name := range podRequests {
resourceNames = append(resourceNames, name)
}
availableResources, err := nodeAvailableResources(nodeIndexer, node, resourceNames)
if err != nil {
return false, []error{err}
}
podFitsOnNode := true
for _, resource := range resourceNames {
podResourceRequest := podRequests[resource]
availableResource, ok := availableResources[resource]
if !ok || podResourceRequest.MilliValue() > availableResource.MilliValue() {
insufficientResources = append(insufficientResources, fmt.Errorf("insufficient %v", resource))
podFitsOnNode = false
}
}
return podFitsOnNode, insufficientResources
}
// nodeAvailableResources returns resources mapped to the quanitity available on the node.
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName) (map[v1.ResourceName]*resource.Quantity, error) {
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
if err != nil {
return nil, err
}
nodeUtilization := NodeUtilization(podsOnNode, resourceNames)
remainingResources := map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI),
}
for _, name := range resourceNames {
if !IsBasicResource(name) {
if _, exists := node.Status.Allocatable[name]; exists {
allocatableResource := node.Status.Allocatable[name]
remainingResources[name] = resource.NewQuantity(allocatableResource.Value()-nodeUtilization[name].Value(), resource.DecimalSI)
} else {
remainingResources[name] = resource.NewQuantity(0, resource.DecimalSI)
}
}
}
return remainingResources, nil
}
// NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated.
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName) map[v1.ResourceName]*resource.Quantity {
totalReqs := map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
}
for _, name := range resourceNames {
if !IsBasicResource(name) {
totalReqs[name] = resource.NewQuantity(0, resource.DecimalSI)
}
}
for _, pod := range pods {
req, _ := utils.PodRequestsAndLimits(pod)
for _, name := range resourceNames {
quantity, ok := req[name]
if ok && name != v1.ResourcePods {
// As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero,
// the format of the quantity will be updated to the format of y.
totalReqs[name].Add(quantity)
}
}
}
return totalReqs
}
// IsBasicResource checks if resource is basic native.
func IsBasicResource(name v1.ResourceName) bool {
switch name {
case v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods:
return true
default:
return false
}
if !ok {
klog.V(2).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return false
}
klog.V(2).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true
}

View File

@@ -21,9 +21,12 @@ import (
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test"
)
@@ -74,7 +77,7 @@ func TestReadyNodesWithNodeSelector(t *testing.T) {
sharedInformerFactory.WaitForCacheSync(stopChannel)
defer close(stopChannel)
nodes, _ := ReadyNodes(ctx, fakeClient, nodeInformer, nodeSelector, nil)
nodes, _ := ReadyNodes(ctx, fakeClient, nodeInformer, nodeSelector)
if nodes[0].Name != "node1" {
t.Errorf("Expected node1, got %s", nodes[0].Name)
@@ -147,13 +150,13 @@ func TestPodFitsCurrentNode(t *testing.T) {
},
},
},
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: nodeLabelValue,
},
},
},
node: test.BuildTestNode("node1", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}),
success: true,
},
{
@@ -181,217 +184,582 @@ func TestPodFitsCurrentNode(t *testing.T) {
},
},
},
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: "no",
},
},
},
node: test.BuildTestNode("node1", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: "no",
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}),
success: false,
},
}
for _, tc := range tests {
actual := PodFitsCurrentNode(tc.pod, tc.node)
if actual != tc.success {
t.Errorf("Test %#v failed", tc.description)
}
t.Run(tc.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
objs = append(objs, tc.node)
objs = append(objs, tc.pod)
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
actual := PodFitsCurrentNode(getPodsAssignedToNode, tc.pod, tc.node)
if actual != tc.success {
t.Errorf("Test %#v failed", tc.description)
}
})
}
}
func TestPodFitsAnyNode(t *testing.T) {
func TestPodFitsAnyOtherNode(t *testing.T) {
nodeLabelKey := "kubernetes.io/desiredNode"
nodeLabelValue := "yes"
nodeTaintKey := "hardware"
nodeTaintValue := "gpu"
// Staging node has no scheduling restrictions, but the pod always starts here and PodFitsAnyOtherNode() doesn't take into account the node the pod is running on.
nodeNames := []string{"node1", "node2", "stagingNode"}
tests := []struct {
description string
pod *v1.Pod
nodes []*v1.Node
success bool
podsOnNodes []*v1.Pod
}{
{
description: "Pod expected to fit one of the nodes",
pod: &v1.Pod{
Spec: v1.PodSpec{
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
},
},
},
},
},
description: "Pod fits another node matching node affinity",
pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: nodeLabelValue,
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: "no",
},
},
},
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: "no",
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
success: true,
podsOnNodes: []*v1.Pod{},
success: true,
},
{
description: "Pod expected to fit one of the nodes (error node first)",
pod: &v1.Pod{
Spec: v1.PodSpec{
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
},
},
},
},
},
description: "Pod expected to fit one of the nodes",
pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: "no",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: nodeLabelValue,
},
},
},
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: "no",
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
success: true,
podsOnNodes: []*v1.Pod{},
success: true,
},
{
description: "Pod expected to fit none of the nodes",
pod: &v1.Pod{
Spec: v1.PodSpec{
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
},
},
},
},
},
pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: "unfit1",
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: "unfit1",
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: "unfit2",
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{},
success: false,
},
{
description: "Nodes are unschedulable but labels match, should fail",
pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Unschedulable = true
}),
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: "no",
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{},
success: false,
},
{
description: "Both nodes are tained, should fail",
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: "unfit2",
}
}),
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
},
},
}
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{},
success: false,
},
{
description: "Two nodes matches node selector, one of them is tained, there is a pod on the available node, and requests are low, should pass",
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("test-pod", 12*1000, 20*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{
"test": "true",
},
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(40*1000*1000*1000, resource.DecimalSI)
}),
},
success: true,
},
{
description: "Two nodes matches node selector, one of them is tained, but CPU requests are too big, should fail",
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
// Notice that this node only has 4 cores, the pod already on the node below requests 3 cores, and the pod above requests 2 cores
test.BuildTestNode(nodeNames[1], 4000, 8*1000*1000*1000, 12, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("3-core-pod", 3000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{
"test": "true",
},
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
},
success: false,
},
{
description: "Nodes are unschedulable but labels match, should fail",
pod: &v1.Pod{
Spec: v1.PodSpec{
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
},
},
},
},
},
description: "Two nodes matches node selector, one of them is tained, but memory requests are too big, should fail",
pod: test.BuildTestPod("p1", 2000, 5*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
nodeLabelKey: nodeLabelValue,
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
},
Spec: v1.NodeSpec{
Unschedulable: true,
},
},
{
ObjectMeta: metav1.ObjectMeta{
}
}),
// Notice that this node only has 8GB of memory, the pod already on the node below requests 4GB, and the pod above requests 5GB
test.BuildTestNode(nodeNames[1], 10*1000, 8*1000*1000*1000, 12, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("4GB-mem-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{
nodeLabelKey: "no",
"test": "true",
},
},
},
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
},
success: false,
},
{
description: "Two nodes matches node selector, one of them is tained, but ephemeral storage requests are too big, should fail",
pod: test.BuildTestPod("p1", 2000, 4*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
// Notice that this node only has 20GB of storage, the pod already on the node below requests 11GB, and the pod above requests 10GB
test.BuildTestNode(nodeNames[1], 10*1000, 8*1000*1000*1000, 12, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(20*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("11GB-storage-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{
"test": "true",
},
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(11*1000*1000*1000, resource.DecimalSI)
}),
},
success: false,
},
{
description: "Two nodes matches node selector, one of them is tained, but custom resource requests are too big, should fail",
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
pod.Spec.Containers[0].Resources.Requests["example.com/custom-resource"] = *resource.NewQuantity(10, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
node.Status.Allocatable["example.com/custom-resource"] = *resource.NewQuantity(15, resource.DecimalSI)
}),
// Notice that this node only has 15 of the custom resource, the pod already on the node below requests 10, and the pod above requests 10
test.BuildTestNode(nodeNames[1], 10*1000, 8*1000*1000*1000, 12, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
node.Status.Allocatable["example.com/custom-resource"] = *resource.NewQuantity(15, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("10-custom-resource-pod", 0, 0, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{
"test": "true",
},
}
pod.Spec.Containers[0].Resources.Requests["example.com/custom-resource"] = *resource.NewQuantity(10, resource.DecimalSI)
}),
},
success: false,
},
{
description: "Two nodes matches node selector, one of them is tained, CPU requests will fit, and pod Overhead is low enough, should pass",
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
// Notice that this node has 5 CPU cores, the pod below requests 2 cores, and has CPU overhead of 1 cores, and the pod above requests 1 core
test.BuildTestNode(nodeNames[1], 5000, 8*1000*1000*1000, 12, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{
"test": "true",
},
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
}),
},
success: true,
},
{
description: "Two nodes matches node selector, one of them is tained, CPU requests will fit, but pod Overhead is too high, should fail",
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
// Notice that this node only has 5 CPU cores, the pod below requests 2 cores, but has CPU overhead of 2 cores, and the pod above requests 2 cores
test.BuildTestNode(nodeNames[1], 5000, 8*1000*1000*1000, 12, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{
"test": "true",
},
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
pod.Spec.Overhead = createResourceList(2000, 1000*1000*1000, 1000*1000*1000)
}),
},
success: false,
},
}
for _, tc := range tests {
actual := PodFitsAnyNode(tc.pod, tc.nodes)
if actual != tc.success {
t.Errorf("Test %#v failed", tc.description)
}
t.Run(tc.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range tc.podsOnNodes {
objs = append(objs, pod)
}
objs = append(objs, tc.pod)
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
actual := PodFitsAnyOtherNode(getPodsAssignedToNode, tc.pod, tc.nodes)
if actual != tc.success {
t.Errorf("Test %#v failed", tc.description)
}
})
}
}
// createResourceList builds a small resource list of core resources
func createResourceList(cpu int64, memory int64, ephemeralStorage int64) v1.ResourceList {
resourceList := make(map[v1.ResourceName]resource.Quantity)
resourceList[v1.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
resourceList[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.DecimalSI)
resourceList[v1.ResourceEphemeralStorage] = *resource.NewQuantity(ephemeralStorage, resource.DecimalSI)
return resourceList
}

View File

@@ -17,116 +17,174 @@ limitations under the License.
package pod
import (
"context"
"sort"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/descheduler/pkg/utils"
)
const (
nodeNameKeyIndex = "spec.nodeName"
)
// FilterFunc is a filter for a pod.
type FilterFunc func(*v1.Pod) bool
// GetPodsAssignedToNodeFunc is a function which accept a node name and a pod filter function
// as input and returns the pods that assigned to the node.
type GetPodsAssignedToNodeFunc func(string, FilterFunc) ([]*v1.Pod, error)
// WrapFilterFuncs wraps a set of FilterFunc in one.
func WrapFilterFuncs(filters ...FilterFunc) FilterFunc {
return func(pod *v1.Pod) bool {
for _, filter := range filters {
if filter != nil && !filter(pod) {
return false
}
}
return true
}
}
type Options struct {
filter func(pod *v1.Pod) bool
includedNamespaces []string
excludedNamespaces []string
filter FilterFunc
includedNamespaces sets.String
excludedNamespaces sets.String
labelSelector *metav1.LabelSelector
}
// NewOptions returns an empty Options.
func NewOptions() *Options {
return &Options{}
}
// WithFilter sets a pod filter.
// The filter function should return true if the pod should be returned from ListPodsOnANode
func WithFilter(filter func(pod *v1.Pod) bool) func(opts *Options) {
return func(opts *Options) {
opts.filter = filter
}
func (o *Options) WithFilter(filter FilterFunc) *Options {
o.filter = filter
return o
}
// WithNamespaces sets included namespaces
func WithNamespaces(namespaces []string) func(opts *Options) {
return func(opts *Options) {
opts.includedNamespaces = namespaces
}
func (o *Options) WithNamespaces(namespaces sets.String) *Options {
o.includedNamespaces = namespaces
return o
}
// WithoutNamespaces sets excluded namespaces
func WithoutNamespaces(namespaces []string) func(opts *Options) {
return func(opts *Options) {
opts.excludedNamespaces = namespaces
}
func (o *Options) WithoutNamespaces(namespaces sets.String) *Options {
o.excludedNamespaces = namespaces
return o
}
// ListPodsOnANode lists all of the pods on a node
// It also accepts an optional "filter" function which can be used to further limit the pods that are returned.
// (Usually this is podEvictor.Evictable().IsEvictable, in order to only list the evictable pods on a node, but can
// be used by strategies to extend it if there are further restrictions, such as with NodeAffinity).
func ListPodsOnANode(
ctx context.Context,
client clientset.Interface,
node *v1.Node,
opts ...func(opts *Options),
) ([]*v1.Pod, error) {
options := &Options{}
for _, opt := range opts {
opt(options)
// WithLabelSelector sets a pod label selector
func (o *Options) WithLabelSelector(labelSelector *metav1.LabelSelector) *Options {
o.labelSelector = labelSelector
return o
}
// BuildFilterFunc builds a final FilterFunc based on Options.
func (o *Options) BuildFilterFunc() (FilterFunc, error) {
var s labels.Selector
var err error
if o.labelSelector != nil {
s, err = metav1.LabelSelectorAsSelector(o.labelSelector)
if err != nil {
return nil, err
}
}
return func(pod *v1.Pod) bool {
if o.filter != nil && !o.filter(pod) {
return false
}
if len(o.includedNamespaces) > 0 && !o.includedNamespaces.Has(pod.Namespace) {
return false
}
if len(o.excludedNamespaces) > 0 && o.excludedNamespaces.Has(pod.Namespace) {
return false
}
if s != nil && !s.Matches(labels.Set(pod.GetLabels())) {
return false
}
return true
}, nil
}
// BuildGetPodsAssignedToNodeFunc establishes an indexer to map the pods and their assigned nodes.
// It returns a function to help us get all the pods that assigned to a node based on the indexer.
func BuildGetPodsAssignedToNodeFunc(podInformer coreinformers.PodInformer) (GetPodsAssignedToNodeFunc, error) {
// Establish an indexer to map the pods and their assigned nodes.
err := podInformer.Informer().AddIndexers(cache.Indexers{
nodeNameKeyIndex: func(obj interface{}) ([]string, error) {
pod, ok := obj.(*v1.Pod)
if !ok {
return []string{}, nil
}
if len(pod.Spec.NodeName) == 0 {
return []string{}, nil
}
return []string{pod.Spec.NodeName}, nil
},
})
if err != nil {
return nil, err
}
pods := make([]*v1.Pod, 0)
fieldSelectorString := "spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)
if len(options.includedNamespaces) > 0 {
fieldSelector, err := fields.ParseSelector(fieldSelectorString)
// The indexer helps us get all the pods that assigned to a node.
podIndexer := podInformer.Informer().GetIndexer()
getPodsAssignedToNode := func(nodeName string, filter FilterFunc) ([]*v1.Pod, error) {
objs, err := podIndexer.ByIndex(nodeNameKeyIndex, nodeName)
if err != nil {
return []*v1.Pod{}, err
return nil, err
}
for _, namespace := range options.includedNamespaces {
podList, err := client.CoreV1().Pods(namespace).List(ctx,
metav1.ListOptions{FieldSelector: fieldSelector.String()})
if err != nil {
return []*v1.Pod{}, err
pods := make([]*v1.Pod, 0, len(objs))
for _, obj := range objs {
pod, ok := obj.(*v1.Pod)
if !ok {
continue
}
for i := range podList.Items {
if options.filter != nil && !options.filter(&podList.Items[i]) {
continue
}
pods = append(pods, &podList.Items[i])
if filter(pod) {
pods = append(pods, pod)
}
}
return pods, nil
}
return getPodsAssignedToNode, nil
}
if len(options.excludedNamespaces) > 0 {
for _, namespace := range options.excludedNamespaces {
fieldSelectorString += ",metadata.namespace!=" + namespace
}
// ListPodsOnANode lists all pods on a node.
// It also accepts a "filter" function which can be used to further limit the pods that are returned.
// (Usually this is podEvictor.Evictable().IsEvictable, in order to only list the evictable pods on a node, but can
// be used by strategies to extend it if there are further restrictions, such as with NodeAffinity).
func ListPodsOnANode(
nodeName string,
getPodsAssignedToNode GetPodsAssignedToNodeFunc,
filter FilterFunc,
) ([]*v1.Pod, error) {
// Succeeded and failed pods are not considered because they don't occupy any resource.
f := func(pod *v1.Pod) bool {
return pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed
}
return ListAllPodsOnANode(nodeName, getPodsAssignedToNode, WrapFilterFuncs(f, filter))
}
fieldSelector, err := fields.ParseSelector(fieldSelectorString)
// ListAllPodsOnANode lists all the pods on a node no matter what the phase of the pod is.
func ListAllPodsOnANode(
nodeName string,
getPodsAssignedToNode GetPodsAssignedToNodeFunc,
filter FilterFunc,
) ([]*v1.Pod, error) {
pods, err := getPodsAssignedToNode(nodeName, filter)
if err != nil {
return []*v1.Pod{}, err
}
// INFO(jchaloup): field selectors do not work properly with listers
// Once the descheduler switches to pod listers (through informers),
// We need to flip to client-side filtering.
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(ctx,
metav1.ListOptions{FieldSelector: fieldSelector.String()})
if err != nil {
return []*v1.Pod{}, err
}
for i := range podList.Items {
// fake client does not support field selectors
// so let's filter based on the node name as well (quite cheap)
if podList.Items[i].Spec.NodeName != node.Name {
continue
}
if options.filter != nil && !options.filter(&podList.Items[i]) {
continue
}
pods = append(pods, &podList.Items[i])
}
return pods, nil
}
@@ -170,3 +228,10 @@ func SortPodsBasedOnPriorityLowToHigh(pods []*v1.Pod) {
return *pods[i].Spec.Priority < *pods[j].Spec.Priority
})
}
// SortPodsBasedOnAge sorts Pods from oldest to most recent in place
func SortPodsBasedOnAge(pods []*v1.Pod) {
sort.Slice(pods, func(i, j int) bool {
return pods[i].CreationTimestamp.Before(&pods[j].CreationTimestamp)
})
}

View File

@@ -20,13 +20,15 @@ import (
"context"
"fmt"
"reflect"
"strings"
"testing"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/test"
)
@@ -38,39 +40,80 @@ var (
func TestListPodsOnANode(t *testing.T) {
testCases := []struct {
name string
pods map[string][]v1.Pod
pods []*v1.Pod
node *v1.Node
labelSelector *metav1.LabelSelector
expectedPodCount int
}{
{
name: "test listing pods on a node",
pods: map[string][]v1.Pod{
"n1": {
*test.BuildTestPod("pod1", 100, 0, "n1", nil),
*test.BuildTestPod("pod2", 100, 0, "n1", nil),
},
"n2": {*test.BuildTestPod("pod3", 100, 0, "n2", nil)},
pods: []*v1.Pod{
test.BuildTestPod("pod1", 100, 0, "n1", nil),
test.BuildTestPod("pod2", 100, 0, "n1", nil),
test.BuildTestPod("pod3", 100, 0, "n2", nil),
},
node: test.BuildTestNode("n1", 2000, 3000, 10, nil),
labelSelector: nil,
expectedPodCount: 2,
},
{
name: "test listing pods with label selector",
pods: []*v1.Pod{
test.BuildTestPod("pod1", 100, 0, "n1", nil),
test.BuildTestPod("pod2", 100, 0, "n1", func(pod *v1.Pod) {
pod.Labels = map[string]string{"foo": "bar"}
}),
test.BuildTestPod("pod3", 100, 0, "n1", func(pod *v1.Pod) {
pod.Labels = map[string]string{"foo": "bar1"}
}),
test.BuildTestPod("pod4", 100, 0, "n2", nil),
},
node: test.BuildTestNode("n1", 2000, 3000, 10, nil),
labelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "foo",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"bar", "bar1"},
},
},
},
expectedPodCount: 2,
},
}
for _, testCase := range testCases {
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
list := action.(core.ListAction)
fieldString := list.GetListRestrictions().Fields.String()
if strings.Contains(fieldString, "n1") {
return true, &v1.PodList{Items: testCase.pods["n1"]}, nil
} else if strings.Contains(fieldString, "n2") {
return true, &v1.PodList{Items: testCase.pods["n2"]}, nil
t.Run(testCase.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
objs = append(objs, testCase.node)
for _, pod := range testCase.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
filter, err := NewOptions().WithLabelSelector(testCase.labelSelector).BuildFilterFunc()
if err != nil {
t.Errorf("Build filter function error: %v", err)
}
pods, _ := ListPodsOnANode(testCase.node.Name, getPodsAssignedToNode, filter)
if len(pods) != testCase.expectedPodCount {
t.Errorf("Expected %v pods on node %v, got %+v", testCase.expectedPodCount, testCase.node.Name, len(pods))
}
return true, nil, fmt.Errorf("Failed to list: %v", list)
})
pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node)
if len(pods) != testCase.expectedPodCount {
t.Errorf("expected %v pods on node %v, got %+v", testCase.expectedPodCount, testCase.node.Name, len(pods))
}
}
}
@@ -113,3 +156,23 @@ func TestSortPodsBasedOnPriorityLowToHigh(t *testing.T) {
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
}
}
func TestSortPodsBasedOnAge(t *testing.T) {
podList := make([]*v1.Pod, 9)
n1 := test.BuildTestNode("n1", 4000, 3000, int64(len(podList)), nil)
for i := 0; i < len(podList); i++ {
podList[i] = test.BuildTestPod(fmt.Sprintf("p%d", i), 1, 32, n1.Name, func(pod *v1.Pod) {
creationTimestamp := metav1.Now().Add(time.Minute * time.Duration(-i))
pod.ObjectMeta.SetCreationTimestamp(metav1.NewTime(creationTimestamp))
})
}
SortPodsBasedOnAge(podList)
for i := 0; i < len(podList)-1; i++ {
if podList[i+1].CreationTimestamp.Before(&podList[i].CreationTimestamp) {
t.Errorf("Expected pods to be sorted by age but pod at index %d was older than %d", i+1, i)
}
}
}

View File

@@ -1,417 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"context"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
func buildTestPodWithImage(podName, node, image string) *v1.Pod {
pod := test.BuildTestPod(podName, 100, 0, node, test.SetRSOwnerRef)
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
Name: image,
Image: image,
})
return pod
}
func TestFindDuplicatePods(t *testing.T) {
ctx := context.Background()
// first setup pods
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
p1.Namespace = "dev"
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
p2.Namespace = "dev"
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
p3.Namespace = "dev"
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
p7 := test.BuildTestPod("p7", 100, 0, node1.Name, nil)
p7.Namespace = "kube-system"
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
p8.Namespace = "test"
p9 := test.BuildTestPod("p9", 100, 0, node1.Name, nil)
p9.Namespace = "test"
p10 := test.BuildTestPod("p10", 100, 0, node1.Name, nil)
p10.Namespace = "test"
p11 := test.BuildTestPod("p11", 100, 0, node1.Name, nil)
p11.Namespace = "different-images"
p12 := test.BuildTestPod("p12", 100, 0, node1.Name, nil)
p12.Namespace = "different-images"
p13 := test.BuildTestPod("p13", 100, 0, node1.Name, nil)
p13.Namespace = "different-images"
p14 := test.BuildTestPod("p14", 100, 0, node1.Name, nil)
p14.Namespace = "different-images"
// ### Evictable Pods ###
// Three Pods in the "default" Namespace, bound to same ReplicaSet. 2 should be evicted.
ownerRef1 := test.GetReplicaSetOwnerRefList()
p1.ObjectMeta.OwnerReferences = ownerRef1
p2.ObjectMeta.OwnerReferences = ownerRef1
p3.ObjectMeta.OwnerReferences = ownerRef1
// Three Pods in the "test" Namespace, bound to same ReplicaSet. 2 should be evicted.
ownerRef2 := test.GetReplicaSetOwnerRefList()
p8.ObjectMeta.OwnerReferences = ownerRef2
p9.ObjectMeta.OwnerReferences = ownerRef2
p10.ObjectMeta.OwnerReferences = ownerRef2
// ### Non-evictable Pods ###
// A DaemonSet.
p4.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
// A Pod with local storage.
p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p5.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
p6.Annotations = test.GetMirrorPodAnnotation()
// A Critical Pod.
priority := utils.SystemCriticalPriority
p7.Spec.Priority = &priority
// Same owners, but different images
p11.Spec.Containers[0].Image = "foo"
p11.ObjectMeta.OwnerReferences = ownerRef1
p12.Spec.Containers[0].Image = "bar"
p12.ObjectMeta.OwnerReferences = ownerRef1
// Multiple containers
p13.ObjectMeta.OwnerReferences = ownerRef1
p13.Spec.Containers = append(p13.Spec.Containers, v1.Container{
Name: "foo",
Image: "foo",
})
testCases := []struct {
description string
maxPodsToEvictPerNode int
pods []v1.Pod
expectedEvictedPodCount int
strategy api.DeschedulerStrategy
}{
{
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 1 should be evicted.",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3},
expectedEvictedPodCount: 1,
strategy: api.DeschedulerStrategy{},
},
{
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{RemoveDuplicates: &api.RemoveDuplicates{ExcludeOwnerKinds: []string{"ReplicaSet"}}}},
},
{
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 1 should be evicted.",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p8, *p9, *p10},
expectedEvictedPodCount: 1,
strategy: api.DeschedulerStrategy{},
},
{
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3, *p8, *p9, *p10},
expectedEvictedPodCount: 2,
strategy: api.DeschedulerStrategy{},
},
{
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
maxPodsToEvictPerNode: 2,
pods: []v1.Pod{*p4, *p5, *p6, *p7},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{},
},
{
description: "Test all Pods: 4 should be evicted.",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10},
expectedEvictedPodCount: 2,
strategy: api.DeschedulerStrategy{},
},
{
description: "Pods with the same owner but different images should not be evicted",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p11, *p12},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{},
},
{
description: "Pods with multiple containers should not match themselves",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p13},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{},
},
{
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p11, *p13},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{},
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: testCase.pods}, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
testCase.maxPodsToEvictPerNode,
[]*v1.Node{node1, node2},
false,
)
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, []*v1.Node{node1, node2}, podEvictor)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != testCase.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
}
})
}
}
func TestRemoveDuplicatesUniformly(t *testing.T) {
ctx := context.Background()
setRSOwnerRef2 := func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
{Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-2"},
}
}
setTwoRSOwnerRef := func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
{Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-1"},
{Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-2"},
}
}
testCases := []struct {
description string
maxPodsToEvictPerNode int
pods []v1.Pod
nodes []*v1.Node
expectedEvictedPodCount int
strategy api.DeschedulerStrategy
}{
{
description: "Evict pods uniformly",
pods: []v1.Pod{
// (5,3,1) -> (3,3,3) -> 2 evictions
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
},
expectedEvictedPodCount: 2,
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, nil),
test.BuildTestNode("n2", 2000, 3000, 10, nil),
test.BuildTestNode("n3", 2000, 3000, 10, nil),
},
strategy: api.DeschedulerStrategy{},
},
{
description: "Evict pods uniformly with one node left out",
pods: []v1.Pod{
// (5,3,1) -> (4,4,1) -> 1 eviction
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
},
expectedEvictedPodCount: 1,
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, nil),
test.BuildTestNode("n2", 2000, 3000, 10, nil),
},
strategy: api.DeschedulerStrategy{},
},
{
description: "Evict pods uniformly with two replica sets",
pods: []v1.Pod{
// (5,3,1) -> (3,3,3) -> 2 evictions
*test.BuildTestPod("p11", 100, 0, "n1", setTwoRSOwnerRef),
*test.BuildTestPod("p12", 100, 0, "n1", setTwoRSOwnerRef),
*test.BuildTestPod("p13", 100, 0, "n1", setTwoRSOwnerRef),
*test.BuildTestPod("p14", 100, 0, "n1", setTwoRSOwnerRef),
*test.BuildTestPod("p15", 100, 0, "n1", setTwoRSOwnerRef),
*test.BuildTestPod("p16", 100, 0, "n2", setTwoRSOwnerRef),
*test.BuildTestPod("p17", 100, 0, "n2", setTwoRSOwnerRef),
*test.BuildTestPod("p18", 100, 0, "n2", setTwoRSOwnerRef),
*test.BuildTestPod("p19", 100, 0, "n3", setTwoRSOwnerRef),
},
expectedEvictedPodCount: 4,
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, nil),
test.BuildTestNode("n2", 2000, 3000, 10, nil),
test.BuildTestNode("n3", 2000, 3000, 10, nil),
},
strategy: api.DeschedulerStrategy{},
},
{
description: "Evict pods uniformly with two owner references",
pods: []v1.Pod{
// (5,3,1) -> (3,3,3) -> 2 evictions
*test.BuildTestPod("p11", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p12", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p13", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p14", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p15", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p16", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p17", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p18", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p19", 100, 0, "n3", test.SetRSOwnerRef),
// (1,3,5) -> (3,3,3) -> 2 evictions
*test.BuildTestPod("p21", 100, 0, "n1", setRSOwnerRef2),
*test.BuildTestPod("p22", 100, 0, "n2", setRSOwnerRef2),
*test.BuildTestPod("p23", 100, 0, "n2", setRSOwnerRef2),
*test.BuildTestPod("p24", 100, 0, "n2", setRSOwnerRef2),
*test.BuildTestPod("p25", 100, 0, "n3", setRSOwnerRef2),
*test.BuildTestPod("p26", 100, 0, "n3", setRSOwnerRef2),
*test.BuildTestPod("p27", 100, 0, "n3", setRSOwnerRef2),
*test.BuildTestPod("p28", 100, 0, "n3", setRSOwnerRef2),
*test.BuildTestPod("p29", 100, 0, "n3", setRSOwnerRef2),
},
expectedEvictedPodCount: 4,
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, nil),
test.BuildTestNode("n2", 2000, 3000, 10, nil),
test.BuildTestNode("n3", 2000, 3000, 10, nil),
},
strategy: api.DeschedulerStrategy{},
},
{
description: "Evict pods with number of pods less than nodes",
pods: []v1.Pod{
// (2,0,0) -> (1,1,0) -> 1 eviction
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
},
expectedEvictedPodCount: 1,
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, nil),
test.BuildTestNode("n2", 2000, 3000, 10, nil),
test.BuildTestNode("n3", 2000, 3000, 10, nil),
},
strategy: api.DeschedulerStrategy{},
},
{
description: "Evict pods with number of pods less than nodes, but ignore different pods with the same ownerref",
pods: []v1.Pod{
// (1, 0, 0) for "bar","baz" images -> no eviction, even with a matching ownerKey
// (2, 0, 0) for "foo" image -> (1,1,0) - 1 eviction
// In this case the only "real" duplicates are p1 and p4, so one of those should be evicted
*buildTestPodWithImage("p1", "n1", "foo"),
*buildTestPodWithImage("p2", "n1", "bar"),
*buildTestPodWithImage("p3", "n1", "baz"),
*buildTestPodWithImage("p4", "n1", "foo"),
},
expectedEvictedPodCount: 1,
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, nil),
test.BuildTestNode("n2", 2000, 3000, 10, nil),
test.BuildTestNode("n3", 2000, 3000, 10, nil),
},
strategy: api.DeschedulerStrategy{},
},
{
description: "Evict pods with a single pod with three nodes",
pods: []v1.Pod{
// (2,0,0) -> (1,1,0) -> 1 eviction
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
},
expectedEvictedPodCount: 0,
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, nil),
test.BuildTestNode("n2", 2000, 3000, 10, nil),
test.BuildTestNode("n3", 2000, 3000, 10, nil),
},
strategy: api.DeschedulerStrategy{},
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: testCase.pods}, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
testCase.maxPodsToEvictPerNode,
testCase.nodes,
false,
)
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != testCase.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
}
})
}
}

View File

@@ -1,466 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"context"
"fmt"
"sort"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
)
// NodeUsage stores a node's info, pods on it, thresholds and its resource usage
type NodeUsage struct {
node *v1.Node
usage map[v1.ResourceName]*resource.Quantity
allPods []*v1.Pod
lowResourceThreshold map[v1.ResourceName]*resource.Quantity
highResourceThreshold map[v1.ResourceName]*resource.Quantity
}
// NodePodsMap is a set of (node, pods) pairs
type NodePodsMap map[*v1.Node][]*v1.Pod
const (
// MinResourcePercentage is the minimum value of a resource's percentage
MinResourcePercentage = 0
// MaxResourcePercentage is the maximum value of a resource's percentage
MaxResourcePercentage = 100
)
func validateLowNodeUtilizationParams(params *api.StrategyParameters) error {
if params == nil || params.NodeResourceUtilizationThresholds == nil {
return fmt.Errorf("NodeResourceUtilizationThresholds not set")
}
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
}
return nil
}
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
// to calculate nodes' utilization and not the actual resource usage.
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
if err := validateLowNodeUtilizationParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid LowNodeUtilization parameters")
return
}
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
if err != nil {
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
return
}
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
if err := validateStrategyConfig(thresholds, targetThresholds); err != nil {
klog.ErrorS(err, "LowNodeUtilization config is not valid")
return
}
// check if Pods/CPU/Mem are set, if not, set them to 100
if _, ok := thresholds[v1.ResourcePods]; !ok {
thresholds[v1.ResourcePods] = MaxResourcePercentage
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
}
if _, ok := thresholds[v1.ResourceCPU]; !ok {
thresholds[v1.ResourceCPU] = MaxResourcePercentage
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
}
if _, ok := thresholds[v1.ResourceMemory]; !ok {
thresholds[v1.ResourceMemory] = MaxResourcePercentage
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
}
lowNodes, targetNodes := classifyNodes(
getNodeUsage(ctx, client, nodes, thresholds, targetThresholds),
// The node has to be schedulable (to be able to move workload there)
func(node *v1.Node, usage NodeUsage) bool {
if nodeutil.IsNodeUnschedulable(node) {
klog.V(2).InfoS("Node is unschedulable, thus not considered as underutilized", "node", klog.KObj(node))
return false
}
return isNodeWithLowUtilization(usage)
},
func(node *v1.Node, usage NodeUsage) bool {
return isNodeAboveTargetUtilization(usage)
},
)
klog.V(1).InfoS("Criteria for a node under utilization",
"CPU", thresholds[v1.ResourceCPU], "Mem", thresholds[v1.ResourceMemory], "Pods", thresholds[v1.ResourcePods])
if len(lowNodes) == 0 {
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
return
}
klog.V(1).InfoS("Total number of underutilized nodes", "totalNumber", len(lowNodes))
if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
klog.V(1).InfoS("Number of nodes underutilized is less than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
return
}
if len(lowNodes) == len(nodes) {
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
return
}
if len(targetNodes) == 0 {
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
return
}
klog.V(1).InfoS("Criteria for a node above target utilization",
"CPU", targetThresholds[v1.ResourceCPU], "Mem", targetThresholds[v1.ResourceMemory], "Pods", targetThresholds[v1.ResourcePods])
klog.V(1).InfoS("Number of nodes above target utilization", "totalNumber", len(targetNodes))
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
evictPodsFromTargetNodes(
ctx,
targetNodes,
lowNodes,
podEvictor,
evictable.IsEvictable)
klog.V(1).InfoS("Total number of pods evicted", "evictedPods", podEvictor.TotalEvicted())
}
// validateStrategyConfig checks if the strategy's config is valid
func validateStrategyConfig(thresholds, targetThresholds api.ResourceThresholds) error {
// validate thresholds and targetThresholds config
if err := validateThresholds(thresholds); err != nil {
return fmt.Errorf("thresholds config is not valid: %v", err)
}
if err := validateThresholds(targetThresholds); err != nil {
return fmt.Errorf("targetThresholds config is not valid: %v", err)
}
// validate if thresholds and targetThresholds have same resources configured
if len(thresholds) != len(targetThresholds) {
return fmt.Errorf("thresholds and targetThresholds configured different resources")
}
for resourceName, value := range thresholds {
if targetValue, ok := targetThresholds[resourceName]; !ok {
return fmt.Errorf("thresholds and targetThresholds configured different resources")
} else if value > targetValue {
return fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", resourceName)
}
}
return nil
}
// validateThresholds checks if thresholds have valid resource name and resource percentage configured
func validateThresholds(thresholds api.ResourceThresholds) error {
if thresholds == nil || len(thresholds) == 0 {
return fmt.Errorf("no resource threshold is configured")
}
for name, percent := range thresholds {
switch name {
case v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods:
if percent < MinResourcePercentage || percent > MaxResourcePercentage {
return fmt.Errorf("%v threshold not in [%v, %v] range", name, MinResourcePercentage, MaxResourcePercentage)
}
default:
return fmt.Errorf("only cpu, memory, or pods thresholds can be specified")
}
}
return nil
}
func getNodeUsage(
ctx context.Context,
client clientset.Interface,
nodes []*v1.Node,
lowThreshold, highThreshold api.ResourceThresholds,
) []NodeUsage {
nodeUsageList := []NodeUsage{}
for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(ctx, client, node)
if err != nil {
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
continue
}
nodeCapacity := node.Status.Capacity
if len(node.Status.Allocatable) > 0 {
nodeCapacity = node.Status.Allocatable
}
nodeUsageList = append(nodeUsageList, NodeUsage{
node: node,
usage: nodeUtilization(node, pods),
allPods: pods,
// A threshold is in percentages but in <0;100> interval.
// Performing `threshold * 0.01` will convert <0;100> interval into <0;1>.
// Multiplying it with capacity will give fraction of the capacity corresponding to the given high/low resource threshold in Quantity units.
lowResourceThreshold: map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(int64(float64(lowThreshold[v1.ResourceCPU])*float64(nodeCapacity.Cpu().MilliValue())*0.01), resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(int64(float64(lowThreshold[v1.ResourceMemory])*float64(nodeCapacity.Memory().Value())*0.01), resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(int64(float64(lowThreshold[v1.ResourcePods])*float64(nodeCapacity.Pods().Value())*0.01), resource.DecimalSI),
},
highResourceThreshold: map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(int64(float64(highThreshold[v1.ResourceCPU])*float64(nodeCapacity.Cpu().MilliValue())*0.01), resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(int64(float64(highThreshold[v1.ResourceMemory])*float64(nodeCapacity.Memory().Value())*0.01), resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(int64(float64(highThreshold[v1.ResourcePods])*float64(nodeCapacity.Pods().Value())*0.01), resource.DecimalSI),
},
})
}
return nodeUsageList
}
func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
nodeCapacity := nodeUsage.node.Status.Capacity
if len(nodeUsage.node.Status.Allocatable) > 0 {
nodeCapacity = nodeUsage.node.Status.Allocatable
}
resourceUsagePercentage := map[v1.ResourceName]float64{}
for resourceName, resourceUsage := range nodeUsage.usage {
cap := nodeCapacity[resourceName]
if !cap.IsZero() {
resourceUsagePercentage[resourceName] = 100 * float64(resourceUsage.Value()) / float64(cap.Value())
}
}
return resourceUsagePercentage
}
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
// low and high thresholds, it is simply ignored.
func classifyNodes(
nodeUsages []NodeUsage,
lowThresholdFilter, highThresholdFilter func(node *v1.Node, usage NodeUsage) bool,
) ([]NodeUsage, []NodeUsage) {
lowNodes, highNodes := []NodeUsage{}, []NodeUsage{}
for _, nodeUsage := range nodeUsages {
if lowThresholdFilter(nodeUsage.node, nodeUsage) {
klog.V(2).InfoS("Node is underutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
lowNodes = append(lowNodes, nodeUsage)
} else if highThresholdFilter(nodeUsage.node, nodeUsage) {
klog.V(2).InfoS("Node is overutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
highNodes = append(highNodes, nodeUsage)
} else {
klog.V(2).InfoS("Node is appropriately utilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
}
}
return lowNodes, highNodes
}
// evictPodsFromTargetNodes evicts pods based on priority, if all the pods on the node have priority, if not
// evicts them based on QoS as fallback option.
// TODO: @ravig Break this function into smaller functions.
func evictPodsFromTargetNodes(
ctx context.Context,
targetNodes, lowNodes []NodeUsage,
podEvictor *evictions.PodEvictor,
podFilter func(pod *v1.Pod) bool,
) {
sortNodesByUsage(targetNodes)
// upper bound on total number of pods/cpu/memory to be moved
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{
v1.ResourcePods: {},
v1.ResourceCPU: {},
v1.ResourceMemory: {},
}
var taintsOfLowNodes = make(map[string][]v1.Taint, len(lowNodes))
for _, node := range lowNodes {
taintsOfLowNodes[node.node.Name] = node.node.Spec.Taints
for name := range totalAvailableUsage {
totalAvailableUsage[name].Add(*node.highResourceThreshold[name])
totalAvailableUsage[name].Sub(*node.usage[name])
}
}
klog.V(1).InfoS(
"Total capacity to be moved",
"CPU", totalAvailableUsage[v1.ResourceCPU].MilliValue(),
"Mem", totalAvailableUsage[v1.ResourceMemory].Value(),
"Pods", totalAvailableUsage[v1.ResourcePods].Value(),
)
for _, node := range targetNodes {
klog.V(3).InfoS("Evicting pods from node", "node", klog.KObj(node.node), "usage", node.usage)
nonRemovablePods, removablePods := classifyPods(node.allPods, podFilter)
klog.V(2).InfoS("Pods on node", "node", klog.KObj(node.node), "allPods", len(node.allPods), "nonRemovablePods", len(nonRemovablePods), "removablePods", len(removablePods))
if len(removablePods) == 0 {
klog.V(1).InfoS("No removable pods on node, try next node", "node", klog.KObj(node.node))
continue
}
klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
evictPods(ctx, removablePods, node, totalAvailableUsage, taintsOfLowNodes, podEvictor)
klog.V(1).InfoS("Evicted pods from node", "node", klog.KObj(node.node), "evictedPods", podEvictor.NodeEvicted(node.node), "usage", node.usage)
}
}
func evictPods(
ctx context.Context,
inputPods []*v1.Pod,
nodeUsage NodeUsage,
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
taintsOfLowNodes map[string][]v1.Taint,
podEvictor *evictions.PodEvictor,
) {
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
continueCond := func() bool {
if !isNodeAboveTargetUtilization(nodeUsage) {
return false
}
if totalAvailableUsage[v1.ResourcePods].CmpInt64(0) < 1 {
return false
}
if totalAvailableUsage[v1.ResourceCPU].CmpInt64(0) < 1 {
return false
}
if totalAvailableUsage[v1.ResourceMemory].CmpInt64(0) < 1 {
return false
}
return true
}
if continueCond() {
for _, pod := range inputPods {
if !utils.PodToleratesTaints(pod, taintsOfLowNodes) {
klog.V(3).InfoS("Skipping eviction for pod, doesn't tolerate node taint", "pod", klog.KObj(pod))
continue
}
success, err := podEvictor.EvictPod(ctx, pod, nodeUsage.node, "LowNodeUtilization")
if err != nil {
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
break
}
if success {
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod), "err", err)
cpuQuantity := utils.GetResourceRequestQuantity(pod, v1.ResourceCPU)
nodeUsage.usage[v1.ResourceCPU].Sub(cpuQuantity)
totalAvailableUsage[v1.ResourceCPU].Sub(cpuQuantity)
memoryQuantity := utils.GetResourceRequestQuantity(pod, v1.ResourceMemory)
nodeUsage.usage[v1.ResourceMemory].Sub(memoryQuantity)
totalAvailableUsage[v1.ResourceMemory].Sub(memoryQuantity)
nodeUsage.usage[v1.ResourcePods].Sub(*resource.NewQuantity(1, resource.DecimalSI))
totalAvailableUsage[v1.ResourcePods].Sub(*resource.NewQuantity(1, resource.DecimalSI))
klog.V(3).InfoS("Updated node usage", "updatedUsage", nodeUsage)
// check if node utilization drops below target threshold or any required capacity (cpu, memory, pods) is moved
if !continueCond() {
break
}
}
}
}
}
// sortNodesByUsage sorts nodes based on usage in descending order
func sortNodesByUsage(nodes []NodeUsage) {
sort.Slice(nodes, func(i, j int) bool {
ti := nodes[i].usage[v1.ResourceMemory].Value() + nodes[i].usage[v1.ResourceCPU].MilliValue() + nodes[i].usage[v1.ResourcePods].Value()
tj := nodes[j].usage[v1.ResourceMemory].Value() + nodes[j].usage[v1.ResourceCPU].MilliValue() + nodes[j].usage[v1.ResourcePods].Value()
// To return sorted in descending order
return ti > tj
})
}
// isNodeAboveTargetUtilization checks if a node is overutilized
// At least one resource has to be above the high threshold
func isNodeAboveTargetUtilization(usage NodeUsage) bool {
for name, nodeValue := range usage.usage {
// usage.highResourceThreshold[name] < nodeValue
if usage.highResourceThreshold[name].Cmp(*nodeValue) == -1 {
return true
}
}
return false
}
// isNodeWithLowUtilization checks if a node is underutilized
// All resources have to be below the low threshold
func isNodeWithLowUtilization(usage NodeUsage) bool {
for name, nodeValue := range usage.usage {
// usage.lowResourceThreshold[name] < nodeValue
if usage.lowResourceThreshold[name].Cmp(*nodeValue) == -1 {
return false
}
}
return true
}
func nodeUtilization(node *v1.Node, pods []*v1.Pod) map[v1.ResourceName]*resource.Quantity {
totalReqs := map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
}
for _, pod := range pods {
req, _ := utils.PodRequestsAndLimits(pod)
for name, quantity := range req {
if name == v1.ResourceCPU || name == v1.ResourceMemory {
// As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero,
// the format of the quantity will be updated to the format of y.
totalReqs[name].Add(quantity)
}
}
}
return totalReqs
}
func classifyPods(pods []*v1.Pod, filter func(pod *v1.Pod) bool) ([]*v1.Pod, []*v1.Pod) {
var nonRemovablePods, removablePods []*v1.Pod
for _, pod := range pods {
if !filter(pod) {
nonRemovablePods = append(nonRemovablePods, pod)
} else {
removablePods = append(removablePods, pod)
}
}
return nonRemovablePods, removablePods
}

View File

@@ -1,776 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"context"
"fmt"
"strings"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
var (
lowPriority = int32(0)
highPriority = int32(10000)
)
func TestLowNodeUtilization(t *testing.T) {
ctx := context.Background()
n1NodeName := "n1"
n2NodeName := "n2"
n3NodeName := "n3"
testCases := []struct {
name string
thresholds, targetThresholds api.ResourceThresholds
nodes map[string]*v1.Node
pods map[string]*v1.PodList
maxPodsToEvictPerNode int
expectedPodsEvicted int
evictedPods []string
}{
{
name: "no evictable pods",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 50,
v1.ResourcePods: 50,
},
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
},
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
// These won't be evicted.
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetDSOwnerRef),
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetDSOwnerRef),
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetDSOwnerRef),
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
},
},
n2NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 0,
},
{
name: "without priorities",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 50,
v1.ResourcePods: 50,
},
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
},
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
*test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
*test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
},
},
n2NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 4,
},
{
name: "without priorities stop when cpu capacity is depleted",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 50,
v1.ResourcePods: 50,
},
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
},
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p1", 400, 300, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p2", 400, 300, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p3", 400, 300, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p4", 400, 300, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p5", 400, 300, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
*test.BuildTestPod("p6", 400, 300, n1NodeName, test.SetDSOwnerRef),
*test.BuildTestPod("p7", 400, 300, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
*test.BuildTestPod("p8", 400, 300, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
},
},
n2NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p9", 400, 2100, n2NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {},
},
maxPodsToEvictPerNode: 0,
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before cpu is depleted
expectedPodsEvicted: 3,
},
{
name: "with priorities",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 50,
v1.ResourcePods: 50,
},
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
},
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodPriority(pod, highPriority)
}),
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodPriority(pod, highPriority)
}),
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodPriority(pod, highPriority)
}),
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodPriority(pod, highPriority)
}),
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodPriority(pod, lowPriority)
}),
// These won't be evicted.
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetDSOwnerRef(pod)
test.SetPodPriority(pod, highPriority)
}),
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
test.SetPodPriority(pod, lowPriority)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
*test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
},
},
n2NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 4,
},
{
name: "without priorities evicting best-effort pods only",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 50,
v1.ResourcePods: 50,
},
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
},
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.MakeBestEffortPod(pod)
}),
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.MakeBestEffortPod(pod)
}),
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.MakeBestEffortPod(pod)
}),
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.MakeBestEffortPod(pod)
}),
// These won't be evicted.
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetDSOwnerRef(pod)
}),
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
*test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
},
},
n2NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {},
},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 4,
evictedPods: []string{"p1", "p2", "p4", "p5"},
},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
list := action.(core.ListAction)
fieldString := list.GetListRestrictions().Fields.String()
if strings.Contains(fieldString, n1NodeName) {
return true, test.pods[n1NodeName], nil
}
if strings.Contains(fieldString, n2NodeName) {
return true, test.pods[n2NodeName], nil
}
if strings.Contains(fieldString, n3NodeName) {
return true, test.pods[n3NodeName], nil
}
return true, nil, fmt.Errorf("Failed to list: %v", list)
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.GetAction)
if node, exists := test.nodes[getAction.GetName()]; exists {
return true, node, nil
}
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
})
podsForEviction := make(map[string]struct{})
for _, pod := range test.evictedPods {
podsForEviction[pod] = struct{}{}
}
evictionFailed := false
if len(test.evictedPods) > 0 {
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.CreateAction)
obj := getAction.GetObject()
if eviction, ok := obj.(*v1beta1.Eviction); ok {
if _, exists := podsForEviction[eviction.Name]; exists {
return true, obj, nil
}
evictionFailed = true
return true, nil, fmt.Errorf("pod %q was unexpectedly evicted", eviction.Name)
}
return true, obj, nil
})
}
var nodes []*v1.Node
for _, node := range test.nodes {
nodes = append(nodes, node)
}
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
test.maxPodsToEvictPerNode,
nodes,
false,
)
strategy := api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
Thresholds: test.thresholds,
TargetThresholds: test.targetThresholds,
},
},
}
LowNodeUtilization(ctx, fakeClient, strategy, nodes, podEvictor)
podsEvicted := podEvictor.TotalEvicted()
if test.expectedPodsEvicted != podsEvicted {
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted)
}
if evictionFailed {
t.Errorf("Pod evictions failed unexpectedly")
}
})
}
}
func TestValidateStrategyConfig(t *testing.T) {
tests := []struct {
name string
thresholds api.ResourceThresholds
targetThresholds api.ResourceThresholds
errInfo error
}{
{
name: "passing invalid thresholds",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 120,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
},
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
},
{
name: "passing invalid targetThresholds",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
"resourceInvalid": 80,
},
errInfo: fmt.Errorf("targetThresholds config is not valid: %v",
fmt.Errorf("only cpu, memory, or pods thresholds can be specified")),
},
{
name: "thresholds and targetThresholds configured different num of resources",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
v1.ResourcePods: 80,
},
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
},
{
name: "thresholds and targetThresholds configured different resources",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourcePods: 80,
},
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
},
{
name: "thresholds' CPU config value is greater than targetThresholds'",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 90,
v1.ResourceMemory: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
},
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", v1.ResourceCPU),
},
{
name: "passing valid strategy config",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
},
errInfo: nil,
},
}
for _, testCase := range tests {
validateErr := validateStrategyConfig(testCase.thresholds, testCase.targetThresholds)
if validateErr == nil || testCase.errInfo == nil {
if validateErr != testCase.errInfo {
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
}
} else if validateErr.Error() != testCase.errInfo.Error() {
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
}
}
}
func TestValidateThresholds(t *testing.T) {
tests := []struct {
name string
input api.ResourceThresholds
errInfo error
}{
{
name: "passing nil map for threshold",
input: nil,
errInfo: fmt.Errorf("no resource threshold is configured"),
},
{
name: "passing no threshold",
input: api.ResourceThresholds{},
errInfo: fmt.Errorf("no resource threshold is configured"),
},
{
name: "passing unsupported resource name",
input: api.ResourceThresholds{
v1.ResourceCPU: 40,
v1.ResourceStorage: 25.5,
},
errInfo: fmt.Errorf("only cpu, memory, or pods thresholds can be specified"),
},
{
name: "passing invalid resource name",
input: api.ResourceThresholds{
v1.ResourceCPU: 40,
"coolResource": 42.0,
},
errInfo: fmt.Errorf("only cpu, memory, or pods thresholds can be specified"),
},
{
name: "passing invalid resource value",
input: api.ResourceThresholds{
v1.ResourceCPU: 110,
v1.ResourceMemory: 80,
},
errInfo: fmt.Errorf("%v threshold not in [%v, %v] range", v1.ResourceCPU, MinResourcePercentage, MaxResourcePercentage),
},
{
name: "passing a valid threshold with max and min resource value",
input: api.ResourceThresholds{
v1.ResourceCPU: 100,
v1.ResourceMemory: 0,
},
errInfo: nil,
},
{
name: "passing a valid threshold with only cpu",
input: api.ResourceThresholds{
v1.ResourceCPU: 80,
},
errInfo: nil,
},
{
name: "passing a valid threshold with cpu, memory and pods",
input: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 30,
v1.ResourcePods: 40,
},
errInfo: nil,
},
}
for _, test := range tests {
validateErr := validateThresholds(test.input)
if validateErr == nil || test.errInfo == nil {
if validateErr != test.errInfo {
t.Errorf("expected validity of threshold: %#v to be %v but got %v instead", test.input, test.errInfo, validateErr)
}
} else if validateErr.Error() != test.errInfo.Error() {
t.Errorf("expected validity of threshold: %#v to be %v but got %v instead", test.input, test.errInfo, validateErr)
}
}
}
func TestWithTaints(t *testing.T) {
ctx := context.Background()
strategy := api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
Thresholds: api.ResourceThresholds{
v1.ResourcePods: 20,
},
TargetThresholds: api.ResourceThresholds{
v1.ResourcePods: 70,
},
},
},
}
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 1000, 3000, 10, nil)
n3 := test.BuildTestNode("n3", 1000, 3000, 10, nil)
n3withTaints := n3.DeepCopy()
n3withTaints.Spec.Taints = []v1.Taint{
{
Key: "key",
Value: "value",
Effect: v1.TaintEffectNoSchedule,
},
}
podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name, test.SetRSOwnerRef)
podThatToleratesTaint.Spec.Tolerations = []v1.Toleration{
{
Key: "key",
Value: "value",
},
}
tests := []struct {
name string
nodes []*v1.Node
pods []*v1.Pod
evictionsExpected int
}{
{
name: "No taints",
nodes: []*v1.Node{n1, n2, n3},
pods: []*v1.Pod{
//Node 1 pods
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
// Node 2 pods
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n2.Name), 200, 0, n2.Name, test.SetRSOwnerRef),
},
evictionsExpected: 1,
},
{
name: "No pod tolerates node taint",
nodes: []*v1.Node{n1, n3withTaints},
pods: []*v1.Pod{
//Node 1 pods
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
// Node 3 pods
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
},
evictionsExpected: 0,
},
{
name: "Pod which tolerates node taint",
nodes: []*v1.Node{n1, n3withTaints},
pods: []*v1.Pod{
//Node 1 pods
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
podThatToleratesTaint,
// Node 3 pods
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
},
evictionsExpected: 1,
},
}
for _, item := range tests {
t.Run(item.name, func(t *testing.T) {
var objs []runtime.Object
for _, node := range item.nodes {
objs = append(objs, node)
}
for _, pod := range item.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
podEvictor := evictions.NewPodEvictor(
fakeClient,
"policy/v1",
false,
item.evictionsExpected,
item.nodes,
false,
)
LowNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor)
if item.evictionsExpected != podEvictor.TotalEvicted() {
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
}
})
}
}

View File

@@ -1,108 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
)
func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) error {
if params == nil || len(params.NodeAffinityType) == 0 {
return fmt.Errorf("NodeAffinityType is empty")
}
// At most one of include/exclude can be set
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
}
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
}
return nil
}
// RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeAffinity parameters")
return
}
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
if err != nil {
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
return
}
var includedNamespaces, excludedNamespaces []string
if strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
}
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
switch nodeAffinity {
case "requiredDuringSchedulingIgnoredDuringExecution":
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(
ctx,
client,
node,
podutil.WithFilter(func(pod *v1.Pod) bool {
return evictable.IsEvictable(pod) &&
!nodeutil.PodFitsCurrentNode(pod, node) &&
nodeutil.PodFitsAnyNode(pod, nodes)
}),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
)
if err != nil {
klog.ErrorS(err, "Failed to get pods", "node", klog.KObj(node))
}
for _, pod := range pods {
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
if _, err := podEvictor.EvictPod(ctx, pod, node, "NodeAffinity"); err != nil {
klog.ErrorS(err, "Error evicting pod")
break
}
}
}
}
default:
klog.ErrorS(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
}
}
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
}

View File

@@ -1,169 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"context"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/test"
)
func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
ctx := context.Background()
requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
NodeAffinityType: []string{
"requiredDuringSchedulingIgnoredDuringExecution",
},
},
}
nodeLabelKey := "kubernetes.io/desiredNode"
nodeLabelValue := "yes"
nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10, nil)
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10, nil)
unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10, nil)
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
unschedulableNodeWithLabels.Spec.Unschedulable = true
addPodsToNode := func(node *v1.Node) []v1.Pod {
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name, nil)
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
},
},
}
pod1 := test.BuildTestPod("pod1", 100, 0, node.Name, nil)
pod2 := test.BuildTestPod("pod2", 100, 0, node.Name, nil)
podWithNodeAffinity.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
return []v1.Pod{
*podWithNodeAffinity,
*pod1,
*pod2,
}
}
tests := []struct {
description string
nodes []*v1.Node
pods []v1.Pod
strategy api.DeschedulerStrategy
expectedEvictedPodCount int
maxPodsToEvictPerNode int
}{
{
description: "Invalid strategy type, should not evict any pods",
strategy: api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
NodeAffinityType: []string{
"requiredDuringSchedulingRequiredDuringExecution",
},
},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: 0,
},
{
description: "Pod is correctly scheduled on node, no eviction expected",
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithLabels),
nodes: []*v1.Node{nodeWithLabels},
maxPodsToEvictPerNode: 0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: 0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should not be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: 1,
},
{
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
expectedEvictedPodCount: 0,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
maxPodsToEvictPerNode: 0,
},
}
for _, tc := range tests {
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: tc.pods}, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
tc.maxPodsToEvictPerNode,
tc.nodes,
false,
)
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
}
}
}

View File

@@ -1,99 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"context"
"fmt"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
)
func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters) error {
if params == nil {
return nil
}
// At most one of include/exclude can be set
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
}
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
}
return nil
}
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeTaints parameters")
return
}
var includedNamespaces, excludedNamespaces []string
if strategy.Params != nil && strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
}
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
if err != nil {
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
return
}
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(
ctx,
client,
node,
podutil.WithFilter(evictable.IsEvictable),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
)
if err != nil {
//no pods evicted as error encountered retrieving evictable Pods
return
}
totalPods := len(pods)
for i := 0; i < totalPods; i++ {
if !utils.TolerationsTolerateTaintsWithFilter(
pods[i].Spec.Tolerations,
node.Spec.Taints,
func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule },
) {
klog.V(2).InfoS("Not all taints with NoSchedule effect are tolerated after update for pod on node", "pod", klog.KObj(pods[i]), "node", klog.KObj(node))
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "NodeTaint"); err != nil {
klog.ErrorS(err, "Error evicting pod")
break
}
}
}
}
}

View File

@@ -1,285 +0,0 @@
package strategies
import (
"context"
"fmt"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
func createNoScheduleTaint(key, value string, index int) v1.Taint {
return v1.Taint{
Key: "testTaint" + fmt.Sprintf("%v", index),
Value: "test" + fmt.Sprintf("%v", index),
Effect: v1.TaintEffectNoSchedule,
}
}
func addTaintsToNode(node *v1.Node, key, value string, indices []int) *v1.Node {
taints := []v1.Taint{}
for _, index := range indices {
taints = append(taints, createNoScheduleTaint(key, value, index))
}
node.Spec.Taints = taints
return node
}
func addTolerationToPod(pod *v1.Pod, key, value string, index int) *v1.Pod {
if pod.Annotations == nil {
pod.Annotations = map[string]string{}
}
pod.Spec.Tolerations = []v1.Toleration{{Key: key + fmt.Sprintf("%v", index), Value: value + fmt.Sprintf("%v", index), Effect: v1.TaintEffectNoSchedule}}
return pod
}
func TestDeletePodsViolatingNodeTaints(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
node1 = addTaintsToNode(node1, "testTaint", "test", []int{1})
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
node1 = addTaintsToNode(node2, "testingTaint", "testing", []int{1})
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p7 := test.BuildTestPod("p7", 100, 0, node2.Name, nil)
p8 := test.BuildTestPod("p8", 100, 0, node2.Name, nil)
p9 := test.BuildTestPod("p9", 100, 0, node2.Name, nil)
p10 := test.BuildTestPod("p10", 100, 0, node2.Name, nil)
p11 := test.BuildTestPod("p11", 100, 0, node2.Name, nil)
p11.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
// The following 4 pods won't get evicted.
// A Critical Pod.
p7.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
p7.Spec.Priority = &priority
// A daemonset.
p8.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
// A pod with local storage.
p9.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p9.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
p10.Annotations = test.GetMirrorPodAnnotation()
p1 = addTolerationToPod(p1, "testTaint", "test", 1)
p3 = addTolerationToPod(p3, "testTaint", "test", 1)
p4 = addTolerationToPod(p4, "testTaintX", "testX", 1)
tests := []struct {
description string
nodes []*v1.Node
pods []v1.Pod
evictLocalStoragePods bool
maxPodsToEvictPerNode int
expectedEvictedPodCount int
}{
{
description: "Pods not tolerating node taint should be evicted",
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p2 gets evicted
},
{
description: "Pods with tolerations but not tolerating node taint should be evicted",
pods: []v1.Pod{*p1, *p3, *p4},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p4 gets evicted
},
{
description: "Only <maxPodsToEvictPerNode> number of Pods not tolerating node taint should be evicted",
pods: []v1.Pod{*p1, *p5, *p6},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
maxPodsToEvictPerNode: 1,
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
},
{
description: "Critical pods not tolerating node taint should not be evicted",
pods: []v1.Pod{*p7, *p8, *p9, *p10},
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 0,
},
{
description: "Critical pods except storage pods not tolerating node taint should not be evicted",
pods: []v1.Pod{*p7, *p8, *p9, *p10},
nodes: []*v1.Node{node2},
evictLocalStoragePods: true,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1,
},
{
description: "Critical and non critical pods, only non critical pods not tolerating node taint should be evicted",
pods: []v1.Pod{*p7, *p8, *p10, *p11},
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1,
},
}
for _, tc := range tests {
// create fake client
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: tc.pods}, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
tc.maxPodsToEvictPerNode,
tc.nodes,
tc.evictLocalStoragePods,
)
RemovePodsViolatingNodeTaints(ctx, fakeClient, api.DeschedulerStrategy{}, tc.nodes, podEvictor)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
}
}
}
func TestToleratesTaint(t *testing.T) {
testCases := []struct {
description string
toleration v1.Toleration
taint v1.Taint
expectTolerated bool
}{
{
description: "toleration and taint have the same key and effect, and operator is Exists, and taint has no value, expect tolerated",
toleration: v1.Toleration{
Key: "foo",
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Effect: v1.TaintEffectNoSchedule,
},
expectTolerated: true,
},
{
description: "toleration and taint have the same key and effect, and operator is Exists, and taint has some value, expect tolerated",
toleration: v1.Toleration{
Key: "foo",
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
},
expectTolerated: true,
},
{
description: "toleration and taint have the same effect, toleration has empty key and operator is Exists, means match all taints, expect tolerated",
toleration: v1.Toleration{
Key: "",
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
},
expectTolerated: true,
},
{
description: "toleration and taint have the same key, effect and value, and operator is Equal, expect tolerated",
toleration: v1.Toleration{
Key: "foo",
Operator: v1.TolerationOpEqual,
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
},
expectTolerated: true,
},
{
description: "toleration and taint have the same key and effect, but different values, and operator is Equal, expect not tolerated",
toleration: v1.Toleration{
Key: "foo",
Operator: v1.TolerationOpEqual,
Value: "value1",
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Value: "value2",
Effect: v1.TaintEffectNoSchedule,
},
expectTolerated: false,
},
{
description: "toleration and taint have the same key and value, but different effects, and operator is Equal, expect not tolerated",
toleration: v1.Toleration{
Key: "foo",
Operator: v1.TolerationOpEqual,
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Value: "bar",
Effect: v1.TaintEffectNoExecute,
},
expectTolerated: false,
},
}
for _, tc := range testCases {
if tolerated := tc.toleration.ToleratesTaint(&tc.taint); tc.expectTolerated != tolerated {
t.Errorf("[%s] expect %v, got %v: toleration %+v, taint %s", tc.description, tc.expectTolerated, tolerated, tc.toleration, tc.taint.ToString())
}
}
}

View File

@@ -1,152 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"context"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
func TestPodAntiAffinity(t *testing.T) {
ctx := context.Background()
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
p5 := test.BuildTestPod("p5", 100, 0, node.Name, nil)
p6 := test.BuildTestPod("p6", 100, 0, node.Name, nil)
p7 := test.BuildTestPod("p7", 100, 0, node.Name, nil)
criticalPriority := utils.SystemCriticalPriority
nonEvictablePod := test.BuildTestPod("non-evict", 100, 0, node.Name, func(pod *v1.Pod) {
pod.Spec.Priority = &criticalPriority
})
p2.Labels = map[string]string{"foo": "bar"}
p5.Labels = map[string]string{"foo": "bar"}
p6.Labels = map[string]string{"foo": "bar"}
p7.Labels = map[string]string{"foo1": "bar1"}
nonEvictablePod.Labels = map[string]string{"foo": "bar"}
test.SetNormalOwnerRef(p1)
test.SetNormalOwnerRef(p2)
test.SetNormalOwnerRef(p3)
test.SetNormalOwnerRef(p4)
test.SetNormalOwnerRef(p5)
test.SetNormalOwnerRef(p6)
test.SetNormalOwnerRef(p7)
// set pod anti affinity
setPodAntiAffinity(p1, "foo", "bar")
setPodAntiAffinity(p3, "foo", "bar")
setPodAntiAffinity(p4, "foo", "bar")
setPodAntiAffinity(p5, "foo1", "bar1")
setPodAntiAffinity(p6, "foo1", "bar1")
setPodAntiAffinity(p7, "foo", "bar")
// set pod priority
test.SetPodPriority(p5, 100)
test.SetPodPriority(p6, 50)
test.SetPodPriority(p7, 0)
tests := []struct {
description string
maxPodsToEvictPerNode int
pods []v1.Pod
expectedEvictedPodCount int
}{
{
description: "Maximum pods to evict - 0",
maxPodsToEvictPerNode: 0,
pods: []v1.Pod{*p1, *p2, *p3, *p4},
expectedEvictedPodCount: 3,
},
{
description: "Maximum pods to evict - 3",
maxPodsToEvictPerNode: 3,
pods: []v1.Pod{*p1, *p2, *p3, *p4},
expectedEvictedPodCount: 3,
},
{
description: "Evict only 1 pod after sorting",
maxPodsToEvictPerNode: 0,
pods: []v1.Pod{*p5, *p6, *p7},
expectedEvictedPodCount: 1,
},
{
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
maxPodsToEvictPerNode: 1,
pods: []v1.Pod{*p1, *nonEvictablePod},
expectedEvictedPodCount: 1,
},
}
for _, test := range tests {
// create fake client
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: test.pods}, nil
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, node, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
test.maxPodsToEvictPerNode,
[]*v1.Node{node},
false,
)
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, podEvictor)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != test.expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
}
}
}
func setPodAntiAffinity(inputPod *v1.Pod, labelKey, labelValue string) {
inputPod.Spec.Affinity = &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: labelKey,
Operator: metav1.LabelSelectorOpIn,
Values: []string{labelValue},
},
},
},
TopologyKey: "region",
},
},
},
}
}

View File

@@ -1,132 +0,0 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
v1meta "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
)
func validatePodLifeTimeParams(params *api.StrategyParameters) error {
if params == nil || params.PodLifeTime == nil || params.PodLifeTime.MaxPodLifeTimeSeconds == nil {
return fmt.Errorf("MaxPodLifeTimeSeconds not set")
}
if params.PodLifeTime.PodStatusPhases != nil {
for _, phase := range params.PodLifeTime.PodStatusPhases {
if phase != string(v1.PodPending) && phase != string(v1.PodRunning) {
return fmt.Errorf("only Pending and Running phases are supported in PodLifeTime")
}
}
}
// At most one of include/exclude can be set
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
}
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
}
return nil
}
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
if err := validatePodLifeTimeParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid PodLifeTime parameters")
return
}
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
if err != nil {
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
return
}
var includedNamespaces, excludedNamespaces []string
if strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
}
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
filter := evictable.IsEvictable
if strategy.Params.PodLifeTime.PodStatusPhases != nil {
filter = func(pod *v1.Pod) bool {
for _, phase := range strategy.Params.PodLifeTime.PodStatusPhases {
if string(pod.Status.Phase) == phase {
return evictable.IsEvictable(pod)
}
}
return false
}
}
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods := listOldPodsOnNode(ctx, client, node, includedNamespaces, excludedNamespaces, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds, filter)
for _, pod := range pods {
success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime")
if success {
klog.V(1).InfoS("Evicted pod because it exceeded its lifetime", "pod", klog.KObj(pod), "maxPodLifeTime", *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds)
}
if err != nil {
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
break
}
}
}
}
func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, includedNamespaces, excludedNamespaces []string, maxPodLifeTimeSeconds uint, filter func(pod *v1.Pod) bool) []*v1.Pod {
pods, err := podutil.ListPodsOnANode(
ctx,
client,
node,
podutil.WithFilter(filter),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
)
if err != nil {
return nil
}
var oldPods []*v1.Pod
for _, pod := range pods {
podAgeSeconds := uint(v1meta.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
if podAgeSeconds > maxPodLifeTimeSeconds {
oldPods = append(oldPods, pod)
}
}
return oldPods
}

View File

@@ -1,198 +0,0 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"context"
"testing"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/test"
)
func TestPodLifeTime(t *testing.T) {
ctx := context.Background()
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
olderPodCreationTime := metav1.NewTime(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC))
newerPodCreationTime := metav1.NewTime(time.Now())
// Setup pods, one should be evicted
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
p1.Namespace = "dev"
p1.ObjectMeta.CreationTimestamp = newerPodCreationTime
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
p2.Namespace = "dev"
p2.ObjectMeta.CreationTimestamp = olderPodCreationTime
ownerRef1 := test.GetReplicaSetOwnerRefList()
p1.ObjectMeta.OwnerReferences = ownerRef1
p2.ObjectMeta.OwnerReferences = ownerRef1
// Setup pods, zero should be evicted
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
p3.Namespace = "dev"
p3.ObjectMeta.CreationTimestamp = newerPodCreationTime
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
p4.Namespace = "dev"
p4.ObjectMeta.CreationTimestamp = newerPodCreationTime
ownerRef2 := test.GetReplicaSetOwnerRefList()
p3.ObjectMeta.OwnerReferences = ownerRef2
p4.ObjectMeta.OwnerReferences = ownerRef2
// Setup pods, one should be evicted
p5 := test.BuildTestPod("p5", 100, 0, node.Name, nil)
p5.Namespace = "dev"
p5.ObjectMeta.CreationTimestamp = newerPodCreationTime
p6 := test.BuildTestPod("p6", 100, 0, node.Name, nil)
p6.Namespace = "dev"
p6.ObjectMeta.CreationTimestamp = metav1.NewTime(time.Now().Add(time.Second * 605))
ownerRef3 := test.GetReplicaSetOwnerRefList()
p5.ObjectMeta.OwnerReferences = ownerRef3
p6.ObjectMeta.OwnerReferences = ownerRef3
// Setup pods, zero should be evicted
p7 := test.BuildTestPod("p7", 100, 0, node.Name, nil)
p7.Namespace = "dev"
p7.ObjectMeta.CreationTimestamp = newerPodCreationTime
p8 := test.BuildTestPod("p8", 100, 0, node.Name, nil)
p8.Namespace = "dev"
p8.ObjectMeta.CreationTimestamp = metav1.NewTime(time.Now().Add(time.Second * 595))
ownerRef4 := test.GetReplicaSetOwnerRefList()
p5.ObjectMeta.OwnerReferences = ownerRef4
p6.ObjectMeta.OwnerReferences = ownerRef4
// Setup two old pods with different status phases
p9 := test.BuildTestPod("p9", 100, 0, node.Name, nil)
p9.Namespace = "dev"
p9.ObjectMeta.CreationTimestamp = olderPodCreationTime
p10 := test.BuildTestPod("p10", 100, 0, node.Name, nil)
p10.Namespace = "dev"
p10.ObjectMeta.CreationTimestamp = olderPodCreationTime
p9.Status.Phase = "Pending"
p10.Status.Phase = "Running"
p9.ObjectMeta.OwnerReferences = ownerRef1
p10.ObjectMeta.OwnerReferences = ownerRef1
var maxLifeTime uint = 600
testCases := []struct {
description string
strategy api.DeschedulerStrategy
maxPodsToEvictPerNode int
pods []v1.Pod
expectedEvictedPodCount int
}{
{
description: "Two pods in the `dev` Namespace, 1 is new and 1 very is old. 1 should be evicted.",
strategy: api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2},
expectedEvictedPodCount: 1,
},
{
description: "Two pods in the `dev` Namespace, 2 are new and 0 are old. 0 should be evicted.",
strategy: api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p3, *p4},
expectedEvictedPodCount: 0,
},
{
description: "Two pods in the `dev` Namespace, 1 created 605 seconds ago. 1 should be evicted.",
strategy: api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p5, *p6},
expectedEvictedPodCount: 1,
},
{
description: "Two pods in the `dev` Namespace, 1 created 595 seconds ago. 0 should be evicted.",
strategy: api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p7, *p8},
expectedEvictedPodCount: 0,
},
{
description: "Two old pods with different status phases. 1 should be evicted.",
strategy: api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
PodLifeTime: &api.PodLifeTime{
MaxPodLifeTimeSeconds: &maxLifeTime,
PodStatusPhases: []string{"Pending"},
},
},
},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p9, *p10},
expectedEvictedPodCount: 1,
},
}
for _, tc := range testCases {
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: tc.pods}, nil
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, node, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
tc.maxPodsToEvictPerNode,
[]*v1.Node{node},
false,
)
PodLifeTime(ctx, fakeClient, tc.strategy, []*v1.Node{node}, podEvictor)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != tc.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
}
}
}

View File

@@ -1,117 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
)
func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameters) error {
if params == nil || params.PodsHavingTooManyRestarts == nil || params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
return fmt.Errorf("PodsHavingTooManyRestarts threshold not set")
}
// At most one of include/exclude can be set
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
}
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
}
return nil
}
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsHavingTooManyRestarts parameters")
return
}
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
if err != nil {
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
return
}
var includedNamespaces, excludedNamespaces []string
if strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
}
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(
ctx,
client,
node,
podutil.WithFilter(evictable.IsEvictable),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
)
if err != nil {
klog.ErrorS(err, "Error listing a nodes pods", "node", klog.KObj(node))
continue
}
for i, pod := range pods {
restarts, initRestarts := calcContainerRestarts(pod)
if strategy.Params.PodsHavingTooManyRestarts.IncludingInitContainers {
if restarts+initRestarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
continue
}
} else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
continue
}
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "TooManyRestarts"); err != nil {
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
break
}
}
}
}
// calcContainerRestarts get container restarts and init container restarts.
func calcContainerRestarts(pod *v1.Pod) (int32, int32) {
var restarts, initRestarts int32
for _, cs := range pod.Status.ContainerStatuses {
restarts += cs.RestartCount
}
for _, cs := range pod.Status.InitContainerStatuses {
initRestarts += cs.RestartCount
}
return restarts, initRestarts
}

View File

@@ -1,184 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategies
import (
"context"
"testing"
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/test"
)
func initPods(node *v1.Node) []v1.Pod {
pods := make([]v1.Pod, 0)
for i := int32(0); i <= 9; i++ {
pod := test.BuildTestPod(fmt.Sprintf("pod-%d", i), 100, 0, node.Name, nil)
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
// pod at index i will have 25 * i restarts.
pod.Status = v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
{
RestartCount: 5 * i,
},
},
ContainerStatuses: []v1.ContainerStatus{
{
RestartCount: 10 * i,
},
{
RestartCount: 10 * i,
},
},
}
pods = append(pods, *pod)
}
// The following 3 pods won't get evicted.
// A daemonset.
pods[6].ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
// A pod with local storage.
pods[7].ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pods[7].Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
pods[8].Annotations = test.GetMirrorPodAnnotation()
return pods
}
func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
ctx := context.Background()
createStrategy := func(enabled, includingInitContainers bool, restartThresholds int32) api.DeschedulerStrategy {
return api.DeschedulerStrategy{
Enabled: enabled,
Params: &api.StrategyParameters{
PodsHavingTooManyRestarts: &api.PodsHavingTooManyRestarts{
PodRestartThreshold: restartThresholds,
IncludingInitContainers: includingInitContainers,
},
},
}
}
tests := []struct {
description string
pods []v1.Pod
strategy api.DeschedulerStrategy
expectedEvictedPodCount int
maxPodsToEvictPerNode int
}{
{
description: "All pods have total restarts under threshold, no pod evictions",
strategy: createStrategy(true, true, 10000),
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: 0,
},
{
description: "Some pods have total restarts bigger than threshold",
strategy: createStrategy(true, true, 1),
expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pods evictions",
strategy: createStrategy(true, true, 1*25),
expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pods evictions",
strategy: createStrategy(true, false, 1*25),
expectedEvictedPodCount: 5,
maxPodsToEvictPerNode: 0,
},
{
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pods evictions",
strategy: createStrategy(true, true, 1*20),
expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pods evictions",
strategy: createStrategy(true, false, 1*20),
expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
},
{
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
strategy: createStrategy(true, true, 5*25+1),
expectedEvictedPodCount: 1,
maxPodsToEvictPerNode: 0,
},
{
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
strategy: createStrategy(true, false, 5*20+1),
expectedEvictedPodCount: 1,
maxPodsToEvictPerNode: 0,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pods evictions",
strategy: createStrategy(true, true, 1),
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: 3,
},
}
for _, tc := range tests {
node := test.BuildTestNode("node1", 2000, 3000, 10, nil)
pods := initPods(node)
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: pods}, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
tc.maxPodsToEvictPerNode,
[]*v1.Node{node},
false,
)
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, []*v1.Node{node}, podEvictor)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
}
}
}

View File

@@ -1,410 +0,0 @@
package strategies
import (
"context"
"fmt"
"sigs.k8s.io/descheduler/pkg/api"
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/test"
)
func TestTopologySpreadConstraint(t *testing.T) {
ctx := context.Background()
testCases := []struct {
name string
pods []*v1.Pod
expectedEvictedCount int
nodes []*v1.Node
strategy api.DeschedulerStrategy
namespaces []string
}{
{
name: "2 domains, sizes [2,1], maxSkew=1, move 0 pods",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
},
pods: createTestPods([]testPodList{
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "zone",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
},
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
},
{
count: 1,
node: "n2",
labels: map[string]string{"foo": "bar"},
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
},
{
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2]",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
},
pods: createTestPods([]testPodList{
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "zone",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
},
{
count: 2,
node: "n1",
labels: map[string]string{"foo": "bar"},
},
{
count: 1,
node: "n2",
labels: map[string]string{"foo": "bar"},
},
}),
expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
},
{
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2], exclude kube-system namespace",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
},
pods: createTestPods([]testPodList{
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "zone",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
},
{
count: 2,
node: "n1",
labels: map[string]string{"foo": "bar"},
},
{
count: 1,
node: "n2",
labels: map[string]string{"foo": "bar"},
},
}),
expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{Enabled: true, Params: &api.StrategyParameters{Namespaces: &api.Namespaces{Exclude: []string{"kube-system"}}}},
namespaces: []string{"ns1"},
},
{
name: "2 domains, sizes [5,2], maxSkew=1, move 1 pod to achieve [4,3]",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
},
pods: createTestPods([]testPodList{
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "zone",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
},
{
count: 4,
node: "n1",
labels: map[string]string{"foo": "bar"},
},
{
count: 2,
node: "n2",
labels: map[string]string{"foo": "bar"},
},
}),
expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
},
{
name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods to achieve [2,2]",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
},
pods: createTestPods([]testPodList{
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "zone",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
},
{
count: 3,
node: "n1",
labels: map[string]string{"foo": "bar"},
},
}),
expectedEvictedCount: 2,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
},
{
name: "2 domains, sizes [4,0], maxSkew=1, only move 1 pod since pods with nodeSelector and nodeAffinity aren't evicted",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
},
pods: createTestPods([]testPodList{
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "zone",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
nodeSelector: map[string]string{"zone": "zoneA"},
},
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
nodeSelector: map[string]string{"zone": "zoneA"},
},
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
nodeAffinity: &v1.Affinity{NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{
{MatchExpressions: []v1.NodeSelectorRequirement{{Key: "foo", Values: []string{"bar"}, Operator: v1.NodeSelectorOpIn}}},
}},
}},
},
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
},
}),
expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
},
{
name: "3 domains, sizes [0, 1, 100], maxSkew=1, move 66 pods to get [34, 33, 34]",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneC" }),
},
pods: createTestPods([]testPodList{
{
count: 1,
node: "n2",
labels: map[string]string{"foo": "bar"},
constraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "zone",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
},
{
count: 100,
node: "n3",
labels: map[string]string{"foo": "bar"},
},
}),
expectedEvictedCount: 66,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
},
{
name: "4 domains, sizes [0, 1, 3, 5], should move 3 to get [2, 2, 3, 2]",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneC" }),
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneD" }),
},
pods: createTestPods([]testPodList{
{
count: 1,
node: "n2",
labels: map[string]string{"foo": "bar"},
constraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "zone",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
},
{
count: 3,
node: "n3",
labels: map[string]string{"foo": "bar"},
},
{
count: 5,
node: "n4",
labels: map[string]string{"foo": "bar"},
},
}),
expectedEvictedCount: 3,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
},
{
name: "2 domains size [2 6], maxSkew=2, should move 1 to get [3 5]",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
},
pods: createTestPods([]testPodList{
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 2,
TopologyKey: "zone",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
},
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
},
{
count: 6,
node: "n2",
labels: map[string]string{"foo": "bar"},
},
}),
expectedEvictedCount: 1,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
podList := make([]v1.Pod, 0, len(tc.pods))
for _, pod := range tc.pods {
podList = append(podList, *pod)
}
return true, &v1.PodList{Items: podList}, nil
})
fakeClient.Fake.AddReactor("list", "namespaces", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.NamespaceList{Items: []v1.Namespace{{ObjectMeta: metav1.ObjectMeta{Name: "ns1", Namespace: "ns1"}}}}, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
100,
tc.nodes,
false,
)
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != tc.expectedEvictedCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted)
}
})
}
}
type testPodList struct {
count int
node string
labels map[string]string
constraints []v1.TopologySpreadConstraint
nodeSelector map[string]string
nodeAffinity *v1.Affinity
}
func createTestPods(testPods []testPodList) []*v1.Pod {
ownerRef1 := test.GetReplicaSetOwnerRefList()
pods := make([]*v1.Pod, 0)
podNum := 0
for _, tp := range testPods {
for i := 0; i < tp.count; i++ {
pods = append(pods,
test.BuildTestPod(fmt.Sprintf("pod-%d", podNum), 100, 0, tp.node, func(p *v1.Pod) {
p.Labels = make(map[string]string)
p.Labels = tp.labels
p.Namespace = "ns1"
p.ObjectMeta.OwnerReferences = ownerRef1
p.Spec.TopologySpreadConstraints = tp.constraints
p.Spec.NodeSelector = tp.nodeSelector
p.Spec.Affinity = tp.nodeAffinity
}))
podNum++
}
}
return pods
}

View File

@@ -0,0 +1,71 @@
package validation
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/utils"
)
// ValidatedStrategyParams contains validated common strategy parameters
type ValidatedStrategyParams struct {
ThresholdPriority int32
IncludedNamespaces sets.String
ExcludedNamespaces sets.String
LabelSelector labels.Selector
NodeFit bool
}
func DefaultValidatedStrategyParams() ValidatedStrategyParams {
return ValidatedStrategyParams{ThresholdPriority: utils.SystemCriticalPriority}
}
func ValidateAndParseStrategyParams(
ctx context.Context,
client clientset.Interface,
params *api.StrategyParameters,
) (*ValidatedStrategyParams, error) {
if params == nil {
defaultValidatedStrategyParams := DefaultValidatedStrategyParams()
return &defaultValidatedStrategyParams, nil
}
// At most one of include/exclude can be set
var includedNamespaces, excludedNamespaces sets.String
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
return nil, fmt.Errorf("only one of Include/Exclude namespaces can be set")
}
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
return nil, fmt.Errorf("only one of ThresholdPriority and thresholdPriorityClassName can be set")
}
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, params)
if err != nil {
return nil, fmt.Errorf("failed to get threshold priority from strategy's params: %+v", err)
}
if params.Namespaces != nil {
includedNamespaces = sets.NewString(params.Namespaces.Include...)
excludedNamespaces = sets.NewString(params.Namespaces.Exclude...)
}
var selector labels.Selector
if params.LabelSelector != nil {
selector, err = metav1.LabelSelectorAsSelector(params.LabelSelector)
if err != nil {
return nil, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
}
}
return &ValidatedStrategyParams{
ThresholdPriority: thresholdPriority,
IncludedNamespaces: includedNamespaces,
ExcludedNamespaces: excludedNamespaces,
LabelSelector: selector,
NodeFit: params.NodeFit,
}, nil
}

View File

@@ -0,0 +1,79 @@
package validation
import (
"context"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"sigs.k8s.io/descheduler/pkg/api"
)
var (
thresholdPriority int32 = 1000
)
func TestValidStrategyParams(t *testing.T) {
ctx := context.Background()
fakeClient := &fake.Clientset{}
testCases := []struct {
name string
params *api.StrategyParameters
}{
{name: "validate nil params", params: nil},
{name: "validate empty params", params: &api.StrategyParameters{}},
{name: "validate params with NodeFit", params: &api.StrategyParameters{NodeFit: true}},
{name: "validate params with ThresholdPriority", params: &api.StrategyParameters{ThresholdPriority: &thresholdPriority}},
{name: "validate params with priorityClassName", params: &api.StrategyParameters{ThresholdPriorityClassName: "high-priority"}},
{name: "validate params with excluded namespace", params: &api.StrategyParameters{Namespaces: &api.Namespaces{Exclude: []string{"excluded-ns"}}}},
{name: "validate params with included namespace", params: &api.StrategyParameters{Namespaces: &api.Namespaces{Include: []string{"include-ns"}}}},
{name: "validate params with empty label selector", params: &api.StrategyParameters{LabelSelector: &metav1.LabelSelector{}}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
params, err := ValidateAndParseStrategyParams(ctx, fakeClient, tc.params)
if err != nil {
t.Errorf("strategy params should be valid but got err: %v", err.Error())
}
if params == nil {
t.Errorf("strategy params should return a strategyParams but got nil")
}
})
}
}
func TestInvalidStrategyParams(t *testing.T) {
ctx := context.Background()
fakeClient := &fake.Clientset{}
testCases := []struct {
name string
params *api.StrategyParameters
}{
{
name: "invalid params with both included and excluded namespaces nil params",
params: &api.StrategyParameters{Namespaces: &api.Namespaces{Include: []string{"include-ns"}, Exclude: []string{"exclude-ns"}}},
},
{
name: "invalid params with both threshold priority and priority class name",
params: &api.StrategyParameters{ThresholdPriorityClassName: "high-priority", ThresholdPriority: &thresholdPriority},
},
{
name: "invalid params with bad label selector",
params: &api.StrategyParameters{LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"": "missing-label"}}},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
params, err := ValidateAndParseStrategyParams(ctx, fakeClient, tc.params)
if err == nil {
t.Errorf("strategy params should be invalid but did not get err")
}
if params != nil {
t.Errorf("strategy params should return a nil strategyParams but got %v", params)
}
})
}
}

View File

@@ -0,0 +1,273 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package descheduler
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/validation"
"sigs.k8s.io/descheduler/pkg/framework"
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
)
// Once all strategies are migrated the arguments get read from the configuration file
// without any wiring. Keeping the wiring here so the descheduler can still use
// the v1alpha1 configuration during the strategy migration to plugins.
var pluginsMap = map[string]func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl){
"RemovePodsViolatingNodeTaints": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
args := &componentconfig.RemovePodsViolatingNodeTaintsArgs{
Namespaces: params.Namespaces,
LabelSelector: params.LabelSelector,
IncludePreferNoSchedule: params.IncludePreferNoSchedule,
ExcludedTaints: params.ExcludedTaints,
}
if err := validation.ValidateRemovePodsViolatingNodeTaintsArgs(args); err != nil {
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingnodetaints.PluginName)
return
}
pg, err := removepodsviolatingnodetaints.New(args, handle)
if err != nil {
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", removepodsviolatingnodetaints.PluginName)
return
}
status := pg.(framework.DeschedulePlugin).Deschedule(ctx, nodes)
if status != nil && status.Err != nil {
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removepodsviolatingnodetaints.PluginName)
}
},
"RemoveFailedPods": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
failedPodsParams := params.FailedPods
if failedPodsParams == nil {
failedPodsParams = &api.FailedPods{}
}
args := &componentconfig.RemoveFailedPodsArgs{
Namespaces: params.Namespaces,
LabelSelector: params.LabelSelector,
IncludingInitContainers: failedPodsParams.IncludingInitContainers,
MinPodLifetimeSeconds: failedPodsParams.MinPodLifetimeSeconds,
ExcludeOwnerKinds: failedPodsParams.ExcludeOwnerKinds,
Reasons: failedPodsParams.Reasons,
}
if err := validation.ValidateRemoveFailedPodsArgs(args); err != nil {
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", removefailedpods.PluginName)
return
}
pg, err := removefailedpods.New(args, handle)
if err != nil {
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", removefailedpods.PluginName)
return
}
status := pg.(framework.DeschedulePlugin).Deschedule(ctx, nodes)
if status != nil && status.Err != nil {
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removefailedpods.PluginName)
}
},
"RemovePodsViolatingNodeAffinity": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
args := &componentconfig.RemovePodsViolatingNodeAffinityArgs{
Namespaces: params.Namespaces,
LabelSelector: params.LabelSelector,
NodeAffinityType: params.NodeAffinityType,
}
if err := validation.ValidateRemovePodsViolatingNodeAffinityArgs(args); err != nil {
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingnodeaffinity.PluginName)
return
}
pg, err := removepodsviolatingnodeaffinity.New(args, handle)
if err != nil {
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", removepodsviolatingnodeaffinity.PluginName)
return
}
status := pg.(framework.DeschedulePlugin).Deschedule(ctx, nodes)
if status != nil && status.Err != nil {
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removepodsviolatingnodeaffinity.PluginName)
}
},
"RemovePodsViolatingInterPodAntiAffinity": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
args := &componentconfig.RemovePodsViolatingInterPodAntiAffinityArgs{
Namespaces: params.Namespaces,
LabelSelector: params.LabelSelector,
}
if err := validation.ValidateRemovePodsViolatingInterPodAntiAffinityArgs(args); err != nil {
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatinginterpodantiaffinity.PluginName)
return
}
pg, err := removepodsviolatinginterpodantiaffinity.New(args, handle)
if err != nil {
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", removepodsviolatinginterpodantiaffinity.PluginName)
return
}
status := pg.(framework.DeschedulePlugin).Deschedule(ctx, nodes)
if status != nil && status.Err != nil {
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removepodsviolatinginterpodantiaffinity.PluginName)
}
},
"RemovePodsHavingTooManyRestarts": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
tooManyRestartsParams := params.PodsHavingTooManyRestarts
if tooManyRestartsParams == nil {
tooManyRestartsParams = &api.PodsHavingTooManyRestarts{}
}
args := &componentconfig.RemovePodsHavingTooManyRestartsArgs{
Namespaces: params.Namespaces,
LabelSelector: params.LabelSelector,
PodRestartThreshold: tooManyRestartsParams.PodRestartThreshold,
IncludingInitContainers: tooManyRestartsParams.IncludingInitContainers,
}
if err := validation.ValidateRemovePodsHavingTooManyRestartsArgs(args); err != nil {
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodshavingtoomanyrestarts.PluginName)
return
}
pg, err := removepodshavingtoomanyrestarts.New(args, handle)
if err != nil {
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", removepodshavingtoomanyrestarts.PluginName)
return
}
status := pg.(framework.DeschedulePlugin).Deschedule(ctx, nodes)
if status != nil && status.Err != nil {
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removepodshavingtoomanyrestarts.PluginName)
}
},
"PodLifeTime": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
podLifeTimeParams := params.PodLifeTime
if podLifeTimeParams == nil {
podLifeTimeParams = &api.PodLifeTime{}
}
var states []string
if podLifeTimeParams.PodStatusPhases != nil {
states = append(states, podLifeTimeParams.PodStatusPhases...)
}
if podLifeTimeParams.States != nil {
states = append(states, podLifeTimeParams.States...)
}
args := &componentconfig.PodLifeTimeArgs{
Namespaces: params.Namespaces,
LabelSelector: params.LabelSelector,
MaxPodLifeTimeSeconds: podLifeTimeParams.MaxPodLifeTimeSeconds,
States: states,
}
if err := validation.ValidatePodLifeTimeArgs(args); err != nil {
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", podlifetime.PluginName)
return
}
pg, err := podlifetime.New(args, handle)
if err != nil {
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", podlifetime.PluginName)
return
}
status := pg.(framework.DeschedulePlugin).Deschedule(ctx, nodes)
if status != nil && status.Err != nil {
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", podlifetime.PluginName)
}
},
"RemoveDuplicates": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
args := &componentconfig.RemoveDuplicatesArgs{
Namespaces: params.Namespaces,
}
if params.RemoveDuplicates != nil {
args.ExcludeOwnerKinds = params.RemoveDuplicates.ExcludeOwnerKinds
}
if err := validation.ValidateRemoveDuplicatesArgs(args); err != nil {
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", removeduplicates.PluginName)
return
}
pg, err := removeduplicates.New(args, handle)
if err != nil {
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", removeduplicates.PluginName)
return
}
status := pg.(framework.BalancePlugin).Balance(ctx, nodes)
if status != nil && status.Err != nil {
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removeduplicates.PluginName)
}
},
"RemovePodsViolatingTopologySpreadConstraint": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
args := &componentconfig.RemovePodsViolatingTopologySpreadConstraintArgs{
Namespaces: params.Namespaces,
LabelSelector: params.LabelSelector,
IncludeSoftConstraints: params.IncludePreferNoSchedule,
}
if err := validation.ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args); err != nil {
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
return
}
pg, err := removepodsviolatingtopologyspreadconstraint.New(args, handle)
if err != nil {
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
return
}
status := pg.(framework.BalancePlugin).Balance(ctx, nodes)
if status != nil && status.Err != nil {
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
}
},
"HighNodeUtilization": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
args := &componentconfig.HighNodeUtilizationArgs{
Thresholds: params.NodeResourceUtilizationThresholds.Thresholds,
NumberOfNodes: params.NodeResourceUtilizationThresholds.NumberOfNodes,
}
if err := validation.ValidateHighNodeUtilizationArgs(args); err != nil {
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", nodeutilization.HighNodeUtilizationPluginName)
return
}
pg, err := nodeutilization.NewHighNodeUtilization(args, handle)
if err != nil {
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", nodeutilization.HighNodeUtilizationPluginName)
return
}
status := pg.(framework.BalancePlugin).Balance(ctx, nodes)
if status != nil && status.Err != nil {
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", nodeutilization.HighNodeUtilizationPluginName)
}
},
"LowNodeUtilization": func(ctx context.Context, nodes []*v1.Node, params *api.StrategyParameters, handle *handleImpl) {
args := &componentconfig.LowNodeUtilizationArgs{
Thresholds: params.NodeResourceUtilizationThresholds.Thresholds,
TargetThresholds: params.NodeResourceUtilizationThresholds.TargetThresholds,
UseDeviationThresholds: params.NodeResourceUtilizationThresholds.UseDeviationThresholds,
NumberOfNodes: params.NodeResourceUtilizationThresholds.NumberOfNodes,
}
if err := validation.ValidateLowNodeUtilizationArgs(args); err != nil {
klog.V(1).ErrorS(err, "unable to validate plugin arguments", "pluginName", nodeutilization.LowNodeUtilizationPluginName)
return
}
pg, err := nodeutilization.NewLowNodeUtilization(args, handle)
if err != nil {
klog.V(1).ErrorS(err, "unable to initialize a plugin", "pluginName", nodeutilization.LowNodeUtilizationPluginName)
return
}
status := pg.(framework.BalancePlugin).Balance(ctx, nodes)
if status != nil && status.Err != nil {
klog.V(1).ErrorS(err, "plugin finished with error", "pluginName", nodeutilization.LowNodeUtilizationPluginName)
}
},
}

Some files were not shown because too many files have changed in this diff Show More