1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Compare commits

...

128 Commits

Author SHA1 Message Date
Kubernetes Prow Robot
8d37557743 Merge pull request #709 from damemi/update-helm-23
Update helm chart version to v0.23
2022-02-03 12:10:58 -08:00
Mike Dame
5081ad84b5 Update helm chart version to v0.23 2022-02-03 14:57:18 -05:00
Kubernetes Prow Robot
afb1d75ce1 Merge pull request #660 from martin-magakian/features/add_affinity_option
Adding 'affinity' support to run 'descheduler' in CronJob or Deployment
2022-01-27 05:56:27 -08:00
Kubernetes Prow Robot
8e3ef9a6b3 Merge pull request #694 from sharkannon/master
Updates to include annotations to the service account
2022-01-27 05:42:26 -08:00
Kubernetes Prow Robot
778a18c550 Merge pull request #700 from jklaw90/root-ctx
Use the root context cancellation
2022-01-27 05:08:25 -08:00
Julian Lawrence
1a98a566b3 adding cancelation from sigint sigterm 2022-01-25 00:10:09 -08:00
Kubernetes Prow Robot
a643c619c9 Merge pull request #699 from ingvagabund/evict-pods-report-metrics-indendent-of-the-dry-mode
Evictor: report successful eviction independently of the dry-mode
2022-01-20 14:16:29 -08:00
Jan Chaloupka
203388ff1a Evictor: report successful eviction independently of the dry-mode
Dry mode currently does not report metrics when the eviction succeeds
2022-01-20 21:23:19 +01:00
Kubernetes Prow Robot
2844f80a35 Merge pull request #677 from ingvagabund/accumulated-eviction
Use a fake client when evicting pods by individual strategies to accumulate the evictions
2022-01-20 08:15:52 -08:00
Jan Chaloupka
901a16ecbc Do not collect the metrics when the metrics server is not enabled 2022-01-20 17:04:15 +01:00
Jan Chaloupka
e0f086ff85 Use a fake client when evicting pods by individual strategies to accumulate the evictions
Currently, when the descheduler is running with the --dry-run on, no strategy actually
evicts a pod so every strategy always starts with a complete list of
pods. E.g. when the PodLifeTime strategy evicts few pods, the RemoveDuplicatePods
strategy still takes into account even the pods eliminated by the PodLifeTime
strategy. Which does not correspond to the real case scenarios as the
same pod can be evicted multiple times. Instead, use a fake client and
evict/delete the pods from its cache so the strategies evict each pod
at most once as it would be normally done in a real cluster.
2022-01-20 17:04:05 +01:00
Stephen Herd
8752a28025 Merge branch 'kubernetes-sigs-master' 2022-01-13 12:52:36 -08:00
Stephen Herd
24884c7568 Rebase from master 2022-01-13 12:52:06 -08:00
Kubernetes Prow Robot
175f648045 Merge pull request #695 from a7i/liveness-template
make livenessprobe consistent across manifests
2022-01-12 13:37:40 -08:00
Amir Alavi
f50a3fa119 make livenessprobe consistent across manifests; make helm chart configurable via values.yaml 2022-01-12 11:49:17 -05:00
Kubernetes Prow Robot
551eced42a Merge pull request #688 from babygoat/evict-failed-without-ownerrefs
feat: support eviction of failed bare pods
2022-01-11 12:31:15 -08:00
Stephen Herd
3635a8171c Updates to include annotations to the service account, needed for things such as Workload Identity in Google Cloud 2022-01-11 11:55:05 -08:00
Kubernetes Prow Robot
796f347305 Merge pull request #692 from jklaw90/sliding-until
NonSlidingUntil for deployment
2022-01-11 06:21:16 -08:00
Kubernetes Prow Robot
13abbe7f09 Merge pull request #693 from developer-guy/patch-1
Update NOTES.txt
2022-01-10 05:11:13 -08:00
Kubernetes Prow Robot
e4df54d2d1 Merge pull request #685 from JaneLiuL/master
add  liveness probe
2022-01-10 04:29:12 -08:00
Jane Liu L
c38f617e40 add liveness probe 2022-01-10 09:56:53 +08:00
Kubernetes Prow Robot
e6551564c4 Merge pull request #691 from RyanDevlin/waitForNodes
Eliminated race condition in E2E tests
2022-01-07 06:16:30 -08:00
Batuhan Apaydın
3a991dd50c Update NOTES.txt
Signed-off-by: Batuhan Apaydın <batuhan.apaydin@trendyol.com>
Co-authored-by: Furkan Türkal <furkan.turkal@trendyol.com>
Co-authored-by: Emin Aktaş <emin.aktas@trendyol.com>
Co-authored-by: Necatican Yıldırım <necatican.yildirim@trendyol.com>
Co-authored-by: Fatih Sarhan <fatih.sarhan@trendyol.com>
2022-01-07 13:42:00 +03:00
Julian Lawrence
77cb406052 updated until -> sliding until 2022-01-06 12:55:10 -08:00
RyanDevlin
921a5680ab Eliminated race condition in E2E tests 2022-01-06 09:36:13 -05:00
babygoat
1529180d70 feat: support eviction of failed bare pods
This patch adds the policy(evictFailedBarePods) to allow the failed
pods without ownerReferences to be evicted. For backward compatibility,
disable the policy by default. Address #644.
2022-01-06 01:07:41 +08:00
Kubernetes Prow Robot
2d9143d129 Merge pull request #687 from jklaw90/error-comment
Comment update for metrics
2022-01-04 06:52:52 -08:00
Kubernetes Prow Robot
e9c0833b6f Merge pull request #689 from ingvagabund/run-hack-update-generated-conversions-sh
run ./hack/update-* scripts
2022-01-04 06:34:52 -08:00
Jan Chaloupka
8462cf56d7 run ./hack/update-* scripts 2022-01-04 09:37:01 +01:00
Julian Lawrence
a60d6a527d updated comment to reflect actual value 2021-12-29 10:56:10 -08:00
Kubernetes Prow Robot
2b23694704 Merge pull request #682 from jklaw90/chart-labels
commonLabels value for chart
2021-12-26 06:15:15 -08:00
Julian Lawrence
d0a95bee2f fixed default value for common labels 2021-12-20 08:24:30 -08:00
Julian Lawrence
57a910f5d1 adding commonLabels value 2021-12-18 23:31:52 -08:00
Kubernetes Prow Robot
ccaedde183 Merge pull request #661 from kirecek/enhc/include-pod-reason
Add pod.Status.Reason to the list of reasons
2021-12-17 13:11:55 -08:00
Erik Jankovič
2020642b6f chore: add pod.Status.Reason to the list of reasons
Signed-off-by: Erik Jankovič <erik.jankovic@gmail.com>
2021-12-17 18:37:53 +01:00
Kubernetes Prow Robot
96ff5d2dd9 Merge pull request #680 from ingvagabund/klog-output-stdout
Set the klog output to stdout by default
2021-12-16 05:31:18 -08:00
Jan Chaloupka
d8718d7db3 Set the klog output to stdout by default
Also, one needs to set --logtostderr=false to properly log into the stdout
2021-12-16 11:22:40 +01:00
Kubernetes Prow Robot
1e5165ba9f Merge pull request #670 from autumn0207/improve_pod_eviction_metrics
Add node name label to the counter metric for evicted pods
2021-12-16 01:49:18 -08:00
autumn0207
8e74f8bd77 improve pod eviction metrics 2021-12-16 17:06:22 +08:00
Kubernetes Prow Robot
2424928019 Merge pull request #667 from damemi/1.23-rc.0
bump: k8s to 1.23
2021-12-15 06:56:20 -08:00
Jan Chaloupka
e6314d2c7e Init the klog directly
Since 3948cb8d1b (diff-465167b08358906be13f9641d4798c6e8ad0790395e045af8ace4d08223fa922R78)
the klog verbosity level gets always overriden.
2021-12-15 09:23:20 -05:00
Kubernetes Prow Robot
271ee3c7e3 Merge pull request #678 from a7i/golangci-fix
fix: install golangci using from the golangci repo
2021-12-15 02:20:19 -08:00
Amir Alavi
e58686c142 fix: install golangci using from the golangci repo 2021-12-14 13:18:19 -05:00
Kubernetes Prow Robot
0b2c10d6ce Merge pull request #673 from Garrybest/pr_pod_cache
list pods assigned to a node by pod informer cache
2021-12-14 01:32:04 -08:00
Garrybest
cac3b9185b reform all test files
Signed-off-by: Garrybest <garrybest@foxmail.com>
2021-12-11 19:43:16 +08:00
Mike Dame
94888e653c Move klog initialization to cli.Run() 2021-12-10 12:00:11 -05:00
Mike Dame
936578b238 Update k8s version in helm test 2021-12-10 10:14:47 -05:00
Mike Dame
4fa7bf978c run hack/update-generated-deep-copies.sh 2021-12-10 10:02:39 -05:00
Mike Dame
2f7c496944 React to 1.23 bump
Logging validation functions changed in upstream commit
54ecfcdac8.
This uses the new function name.
2021-12-10 10:02:26 -05:00
Mike Dame
5fe3ca86ff bump: k8s to 1.23 2021-12-10 10:02:14 -05:00
Garrybest
0ff8ecb41e reform all strategies by using getPodsAssignedToNode
Signed-off-by: Garrybest <garrybest@foxmail.com>
2021-12-10 19:28:51 +08:00
Garrybest
08ed129a07 reform ListPodsOnANode by using pod informer and indexer
Signed-off-by: Garrybest <garrybest@foxmail.com>
2021-12-10 19:25:20 +08:00
Kubernetes Prow Robot
49ad197dfc Merge pull request #658 from JaneLiuL/master
Add maxNoOfPodsToEvictPerNamespace policy
2021-12-03 01:50:27 -08:00
Kubernetes Prow Robot
82201d0e48 Add maxNoOfPodsToEvictPerNamespace policy 2021-12-03 10:58:37 +08:00
Kubernetes Prow Robot
2b95332e8c Merge pull request #665 from spiffxp/use-k8s-infra-for-gcb-image
images: use k8s-staging-test-infra/gcb-docker-gcloud
2021-11-30 13:59:01 -08:00
Aaron Crickenberger
e8ed62e540 images: use k8s-staging-test-infra/gcb-docker-gcloud 2021-11-30 13:12:18 -08:00
Kubernetes Prow Robot
e5725de7bb Merge pull request #664 from stpabhi/dev
fix typo minPodLifeTimeSeconds
2021-11-30 08:10:56 -08:00
Abhilash Pallerlamudi
c47e811937 fix typo minPodLifeTimeSeconds
Signed-off-by: Abhilash Pallerlamudi <stp.abhi@gmail.com>
2021-11-29 17:51:40 -08:00
Kubernetes Prow Robot
e0bac4c371 Merge pull request #662 from ingvagabund/drop-deprecated-flags
Drop deprecated flags
2021-11-29 08:43:23 -08:00
Jan Chaloupka
73a7adf572 Drop deprecated flags 2021-11-29 17:12:59 +01:00
Kubernetes Prow Robot
5cf381a817 Merge pull request #663 from ingvagabund/bump-go-to-1.17
Bump go version in go.mod to go1.17
2021-11-29 08:01:23 -08:00
Jan Chaloupka
4603182320 Bump go version in go.mod to go1.17 2021-11-29 16:49:35 +01:00
Martin Magakian
ad207775ff Adding 'affinity' support to run 'descheduler' in CronJob or Deployment 2021-11-18 11:08:36 +01:00
Kubernetes Prow Robot
d0f11a41c0 Merge pull request #639 from JaneLiuL/master
Ignore Pods With Deletion Timestamp
2021-11-15 08:44:49 -08:00
Jane Liu L
c7524705b3 Ignore Pods With Deletion Timestamp 2021-11-10 09:32:11 +08:00
Kubernetes Prow Robot
50f9513cbb Merge pull request #642 from wking/clarify-RemovePodsHavingTooManyRestarts
README: Clarify podRestartThreshold applying to the sum over containers
2021-10-13 03:15:49 -07:00
W. Trevor King
6fd80ba29c README: Clarify podRestartThreshold applying to the sum over containers
calcContainerRestarts sums over containers.  The new language makes
that clear, avoiding potential confusion vs. an altenative that looked
for pods where a single container had passed the configured threshold.
For example, with three containers with 50 restarts and a threshold of
100, the actual "sum over containers" logic makes that pod a candidate
for descheduling, but the "largest single container restart count"
hypothetical would not have made it a candidate.

Also shifts labelSelector into the parameter table, because when it
was added in 29ade13ce7 (README and e2e-testcase add for
labelSelector, 2021-03-02, #510), it landed a few lines too high.
2021-10-07 14:51:26 -07:00
Kubernetes Prow Robot
5b557941fa Merge pull request #627 from JaneLiuL/master
Add E2E test case cover duplicatepods strategy
2021-10-01 00:01:22 -07:00
Kubernetes Prow Robot
c6229934a0 Merge pull request #637 from KohlsTechnology/helm-suspend-docs
Document suspend helm chart configuration option
2021-09-30 23:47:22 -07:00
Sean Malloy
ed28eaeccc Document suspend helm chart configuration option 2021-09-30 23:30:10 -05:00
Kubernetes Prow Robot
3be910c238 Merge pull request #621 from uthark/oatamanenko/deleted
Ignore pods being deleted
2021-09-30 21:21:22 -07:00
Kubernetes Prow Robot
d96dd6da2d Merge pull request #632 from a7i/amir/failedpods-crash
RemoveFailedPods: guard against nil descheduler strategy (e.g. in case of default that loads all strategies)
2021-09-29 01:02:49 -07:00
Amir Alavi
f7c26ef41f e2e tests for RemoveFailedPods strategy
Fix priority class default
2021-09-26 20:39:32 -04:00
Jane Liu L
57ad9cc91b Add E2E test case cover tooManyRestarts strategy 2021-09-26 09:10:17 +08:00
Kubernetes Prow Robot
926339594d Merge pull request #622 from yutachaos/feature/added_suspend_parameter
Added support for cronjob suspend
2021-09-22 09:18:01 -07:00
Amir Alavi
1ba53ad68c e2e TestTopologySpreadConstraint: ensure pods are running before checking for topology spread across domains 2021-09-20 18:18:47 -04:00
Amir Alavi
6eb37ce079 RemoveFailedPods: guard against nil descheduler strategy (e.g. in case of default that loads all strategies) 2021-09-20 11:20:54 -04:00
Kubernetes Prow Robot
54d660eee0 Merge pull request #629 from chenkaiyue/fix-node-affinity-test
fix duplicate code in node_affinity_test.go
2021-09-16 01:59:46 -07:00
yutachaos
cf219fbfae Added helm chart suspend parameter
Signed-off-by: yutachaos <18604471+yutachaos@users.noreply.github.com>
2021-09-16 14:32:09 +09:00
kaiyuechen
d1d9ea0c48 fix duplicate code in node_affinity_test.go 2021-09-16 10:39:52 +08:00
Oleg Atamanenko
4448d9c670 Ignore pods being deleted 2021-09-15 00:05:51 -07:00
Kubernetes Prow Robot
3909f3acae Merge pull request #623 from damemi/release-1.22
Update Helm chart version to 0.22.0
2021-09-08 13:13:56 -07:00
Mike Dame
9f1274f3ab Update Helm chart version to 0.22.0 2021-09-08 16:00:56 -04:00
Kubernetes Prow Robot
e6926e11ea Merge pull request #617 from KohlsTechnology/docs-1.22
Update Docs and Manifests for v0.22.0
2021-08-31 09:31:37 -07:00
Sean Malloy
16228c9dd1 Update Docs and Manifests for v0.22.0
* Added v0.22 references to README
* Update k8s manifests with v0.22.0 references
* Added table with list of supported architectures by release
2021-08-31 00:24:18 -05:00
Kubernetes Prow Robot
57dabbca5c Merge pull request #597 from a7i/amir/e2e-topology
Add e2e tests for TopologySpreadConstraint
2021-08-30 22:00:28 -07:00
Kubernetes Prow Robot
04439c6e64 Merge pull request #610 from a7i/failed-pods
Introduce RemoveFailedPods strategy
2021-08-30 11:30:09 -07:00
Amir Alavi
0e0e688fe8 Introduce RemoveFailedPods strategy 2021-08-30 14:17:52 -04:00
Kubernetes Prow Robot
0603de4353 Merge pull request #613 from derdanne/derdanne/highnodeutilization-readme-591
nodeutilization strategy: "numberOfNodes" should work as documented
2021-08-24 08:29:14 -07:00
Kubernetes Prow Robot
ea911db6dc Merge pull request #615 from KohlsTechnology/bump-1.22
Bump To k8s 1.22.0
2021-08-19 07:07:24 -07:00
Sean Malloy
c079c7aaae Bump To k8s 1.22.0 2021-08-19 00:40:38 -05:00
Kubernetes Prow Robot
5420988a28 Merge pull request #614 from KohlsTechnology/bump-go-version
Bump To Go 1.16.7
2021-08-18 02:10:08 -07:00
Sean Malloy
b56a1ab80a Bump To Go 1.16.7
The main k/k repo was updated to Go 1.16.7 for k8s
v1.22.0 release. See below PR for reference.

https://github.com/kubernetes/kubernetes/pull/104200
2021-08-17 23:47:45 -05:00
Daniel Klockenkämper
a6b34c1130 nodeutilization strategy: "numberOfNodes" should work as documented 2021-08-17 13:29:07 +00:00
Amir Alavi
0de8002b7d Update gce scripts to spread nodes over 2 zones 2021-08-16 22:41:37 -04:00
Amir Alavi
84d648ff60 Add e2e tests for TopologySpreadConstraint 2021-08-16 22:39:31 -04:00
Kubernetes Prow Robot
6ad6f6fce5 Merge pull request #592 from chrisjohnson00/issue-591
docs: adding clarification to HighNodeUtilization's purpose
2021-08-12 21:38:22 -07:00
Kubernetes Prow Robot
b7100ad871 Merge pull request #582 from praxist/helm-deployment
Update helm chart for running as deployment
2021-08-12 07:29:47 -07:00
Matthew Leung
38c0f1c639 Update helm chart for running as deployment 2021-08-11 11:56:14 -07:00
Kubernetes Prow Robot
64d7901d82 Merge pull request #602 from a7i/balance-domains-aboveavg
TopologySpreadConstraint: advance above avg index when at ideal average
2021-08-10 05:47:25 -07:00
Kubernetes Prow Robot
ab1015e5fa Merge pull request #599 from a7i/amir/update-gce-images
Update GCE images to Ubuntu 18.04 LTS
2021-08-10 05:47:18 -07:00
Kubernetes Prow Robot
1753bf4422 Merge pull request #611 from a7i/docs-remove-redundant-strategies
Update README to remove redundant list of strategies
2021-08-02 08:11:22 -07:00
Amir Alavi
7cb44dca27 Update README to remove redundant list of strategies 2021-07-30 16:35:00 -04:00
Amir Alavi
2ec4b8b674 Update GCE images to Ubuntu 18.04 LTS 2021-07-30 16:14:20 -04:00
Kubernetes Prow Robot
0d0633488d Merge pull request #598 from jayonlau/clenup
Clean up extra spaces
2021-07-30 11:11:37 -07:00
Kubernetes Prow Robot
f3b3853d9d Merge pull request #562 from wsscc2021/helm-chart-tolerations
Add tolerations to cronjob in helm chart
2021-07-30 10:43:38 -07:00
Kubernetes Prow Robot
e550e5e22a Merge pull request #600 from a7i/topology-example
Add example for RemovePodsViolatingTopologySpreadConstraint
2021-07-28 05:37:34 -07:00
Kubernetes Prow Robot
2bf37ff495 Merge pull request #608 from a7i/master
Remove kind binary from repo
2021-07-28 01:13:35 -07:00
Amir Alavi
fa84ec6774 Remove kind binary from repo and add to gitignore 2021-07-27 08:28:55 -04:00
Kubernetes Prow Robot
e18e0416b1 Merge pull request #607 from a7i/fix-helm-test
Place bash shebang at the top of the script + Ensure Helm installed for run-helm-tests
2021-07-27 00:40:45 -07:00
Amir Alavi
34282162f8 Wait for job to start/finish in helm-test script 2021-07-26 23:53:36 -04:00
Amir Alavi
7a043d31be Ensure helm is installed and move shebang to top of file 2021-07-26 16:16:20 -04:00
Amir Alavi
b0e5d64bd7 TopologySpreadConstraint: advance above avg index when at ideal average when balancing domains 2021-07-13 22:55:10 -04:00
Amir Alavi
1c9ac2daee Add example for RemovePodsViolatingTopologySpreadConstraint 2021-07-09 22:56:59 -04:00
jayonlau
c6b67e8a6f Clean up extra spaces
Clean up extra spaces, although these errors are not important, they affect the code specification.
2021-07-09 15:40:35 +08:00
Kubernetes Prow Robot
2e4873d103 Merge pull request #594 from ikarldasan/master-to-main
Rename master to main
2021-07-06 05:50:18 -07:00
Karl Dasan
3e483c4d85 Rename master to main 2021-07-01 13:48:50 +05:30
Kubernetes Prow Robot
032ea70380 Merge pull request #549 from pravarag/verify-defaulters-gen
Add verify scripts for defaulters generator
2021-06-30 04:05:03 -07:00
Pravar Agrawal
df84dc4548 add verify script for defaulters gen 2021-06-30 09:41:43 +05:30
Chris Johnson
d4fa83f8bc chore: PR feedback 2021-06-25 12:30:28 -07:00
Chris Johnson
448dbceadd docs: adding clarification to HighNodeUtilization's purpose 2021-06-24 16:20:21 -07:00
Kubernetes Prow Robot
b83b064992 Merge pull request #589 from a7i/remove-kind-bin
Remove kind binary from repo
2021-06-16 00:19:59 -07:00
Amir Alavi
133a0049e3 Remove kind binary from repo 2021-06-15 18:29:06 -04:00
Kubernetes Prow Robot
50b1c1337d Merge pull request #561 from mekza/support_image_pull_secrets
Add support for private registry creds
2021-06-10 23:28:58 -07:00
Martin-Zack Mekkaoui
d5deed44ca Add support for private registry creds 2021-06-09 22:11:37 +02:00
Kubernetes Prow Robot
0f785b9530 Merge pull request #584 from damemi/update-go-badge
Update Go report card badge
2021-06-08 10:48:08 -07:00
Mike Dame
eb1f0ecc14 Update Go report card badge 2021-06-08 13:39:53 -04:00
Donggeun Lee
4ba48b018c Add tolerations to cronjob in helm chart 2021-05-04 19:56:08 +09:00
2396 changed files with 215372 additions and 63848 deletions

1
.gitignore vendored
View File

@@ -4,3 +4,4 @@ vendordiff.patch
.idea/ .idea/
*.code-workspace *.code-workspace
.vscode/ .vscode/
kind

View File

@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
FROM golang:1.16.1 FROM golang:1.17.3
WORKDIR /go/src/sigs.k8s.io/descheduler WORKDIR /go/src/sigs.k8s.io/descheduler
COPY . . COPY . .

View File

@@ -24,8 +24,8 @@ ARCHS = amd64 arm arm64
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}" LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
GOLANGCI_VERSION := v1.30.0 GOLANGCI_VERSION := v1.43.0
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint) HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
# REGISTRY is the container registry to push # REGISTRY is the container registry to push
# into. The default is to push to the staging # into. The default is to push to the staging
@@ -43,7 +43,7 @@ IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
# In the future binaries can be uploaded to # In the future binaries can be uploaded to
# GCS bucket gs://k8s-staging-descheduler. # GCS bucket gs://k8s-staging-descheduler.
HAS_HELM := $(shell which helm) HAS_HELM := $(shell which helm 2> /dev/null)
all: build all: build
@@ -127,18 +127,21 @@ gen:
verify-gen: verify-gen:
./hack/verify-conversions.sh ./hack/verify-conversions.sh
./hack/verify-deep-copies.sh ./hack/verify-deep-copies.sh
./hack/verify-defaulters.sh
lint: lint:
ifndef HAS_GOLANGCI ifndef HAS_GOLANGCI
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION} curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
endif endif
./_output/bin/golangci-lint run ./_output/bin/golangci-lint run
lint-chart: lint-chart: ensure-helm-install
ifndef HAS_HELM
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
endif
helm lint ./charts/descheduler helm lint ./charts/descheduler
test-helm: test-helm: ensure-helm-install
./test/run-helm-tests.sh ./test/run-helm-tests.sh
ensure-helm-install:
ifndef HAS_HELM
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
endif

117
README.md
View File

@@ -1,4 +1,4 @@
[![Go Report Card](https://goreportcard.com/badge/kubernetes-sigs/descheduler)](https://goreportcard.com/report/sigs.k8s.io/descheduler) [![Go Report Card](https://goreportcard.com/badge/sigs.k8s.io/descheduler)](https://goreportcard.com/report/sigs.k8s.io/descheduler)
![Release Charts](https://github.com/kubernetes-sigs/descheduler/workflows/Release%20Charts/badge.svg) ![Release Charts](https://github.com/kubernetes-sigs/descheduler/workflows/Release%20Charts/badge.svg)
# Descheduler for Kubernetes # Descheduler for Kubernetes
@@ -42,6 +42,7 @@ Table of Contents
- [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint) - [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint)
- [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts) - [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts)
- [PodLifeTime](#podlifetime) - [PodLifeTime](#podlifetime)
- [RemoveFailedPods](#removefailedpods)
- [Filter Pods](#filter-pods) - [Filter Pods](#filter-pods)
- [Namespace filtering](#namespace-filtering) - [Namespace filtering](#namespace-filtering)
- [Priority filtering](#priority-filtering) - [Priority filtering](#priority-filtering)
@@ -102,17 +103,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
Run As A Job Run As A Job
``` ```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.21.0' | kubectl apply -f - kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.22.0' | kubectl apply -f -
``` ```
Run As A CronJob Run As A CronJob
``` ```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.21.0' | kubectl apply -f - kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.22.0' | kubectl apply -f -
``` ```
Run As A Deployment Run As A Deployment
``` ```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.21.0' | kubectl apply -f - kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.22.0' | kubectl apply -f -
``` ```
## User Guide ## User Guide
@@ -121,36 +122,28 @@ See the [user guide](docs/user-guide.md) in the `/docs` directory.
## Policy and Strategies ## Policy and Strategies
Descheduler's policy is configurable and includes strategies that can be enabled or disabled. Descheduler's policy is configurable and includes strategies that can be enabled or disabled. By default, all strategies are enabled.
Nine strategies
1. `RemoveDuplicates`
2. `LowNodeUtilization`
3. `HighNodeUtilization`
4. `RemovePodsViolatingInterPodAntiAffinity`
5. `RemovePodsViolatingNodeAffinity`
6. `RemovePodsViolatingNodeTaints`
7. `RemovePodsViolatingTopologySpreadConstraint`
8. `RemovePodsHavingTooManyRestarts`
9. `PodLifeTime`
are currently implemented. As part of the policy, the
parameters associated with the strategies can be configured too. By default, all strategies are enabled.
The following diagram provides a visualization of most of the strategies to help The policy includes a common configuration that applies to all the strategies:
categorize how strategies fit together. | Name | Default Value | Description |
|------|---------------|-------------|
| `nodeSelector` | `nil` | limiting the nodes which are processed |
| `evictLocalStoragePods` | `false` | allows eviction of pods with local storage |
| `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
| `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored |
| `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) |
| `evictFailedBarePods` | `false` | allow eviction of pods without owner references and in failed phase |
![Strategies diagram](strategies_diagram.png) As part of the policy, the parameters associated with each strategy can be configured.
See each strategy for details on available parameters.
The policy also includes common configuration for all the strategies: **Policy:**
- `nodeSelector` - limiting the nodes which are processed
- `evictLocalStoragePods` - allows eviction of pods with local storage
- `evictSystemCriticalPods` - [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns
- `ignorePvcPods` - set whether PVC pods should be evicted or ignored (defaults to `false`)
- `maxNoOfPodsToEvictPerNode` - maximum number of pods evicted from each node (summed through all strategies)
```yaml ```yaml
apiVersion: "descheduler/v1alpha1" apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy" kind: "DeschedulerPolicy"
nodeSelector: prod=dev nodeSelector: prod=dev
evictFailedBarePods: false
evictLocalStoragePods: true evictLocalStoragePods: true
evictSystemCriticalPods: true evictSystemCriticalPods: true
maxNoOfPodsToEvictPerNode: 40 maxNoOfPodsToEvictPerNode: 40
@@ -159,6 +152,11 @@ strategies:
... ...
``` ```
The following diagram provides a visualization of most of the strategies to help
categorize how strategies fit together.
![Strategies diagram](strategies_diagram.png)
### RemoveDuplicates ### RemoveDuplicates
This strategy makes sure that there is only one pod associated with a ReplicaSet (RS), This strategy makes sure that there is only one pod associated with a ReplicaSet (RS),
@@ -267,9 +265,11 @@ under utilized frequently or for a short period of time. By default, `numberOfNo
### HighNodeUtilization ### HighNodeUtilization
This strategy finds nodes that are under utilized and evicts pods in the hope that these pods will be scheduled compactly into fewer nodes. This strategy finds nodes that are under utilized and evicts pods from the nodes in the hope that these pods will be
This strategy **must** be used with the scheduled compactly into fewer nodes. Used in conjunction with node auto-scaling, this strategy is intended to help
scheduler strategy `MostRequestedPriority`. The parameters of this strategy are configured under `nodeResourceUtilizationThresholds`. trigger down scaling of under utilized nodes.
This strategy **must** be used with the scheduler strategy `MostRequestedPriority`. The parameters of this strategy are
configured under `nodeResourceUtilizationThresholds`.
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage. The percentage is `thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage. The percentage is
@@ -458,9 +458,9 @@ strategies:
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
include `podRestartThreshold`, which is the number of restarts at which a pod should be evicted, and `includingInitContainers`, include `podRestartThreshold`, which is the number of restarts (summed over all eligible containers) at which a pod
which determines whether init container restarts should be factored into that calculation. should be evicted, and `includingInitContainers`, which determines whether init container restarts should be factored
|`labelSelector`|(see [label filtering](#label-filtering))| into that calculation.
**Parameters:** **Parameters:**
@@ -471,6 +471,7 @@ which determines whether init container restarts should be factored into that ca
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))| |`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))| |`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))| |`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))| |`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:** **Example:**
@@ -520,6 +521,47 @@ strategies:
- "Pending" - "Pending"
``` ```
### RemoveFailedPods
This strategy evicts pods that are in failed status phase.
You can provide an optional parameter to filter by failed `reasons`.
`reasons` can be expanded to include reasons of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
You can specify an optional parameter `minPodLifetimeSeconds` to evict pods that are older than specified seconds.
Lastly, you can specify the optional parameter `excludeOwnerKinds` and if a pod
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
**Parameters:**
|Name|Type|
|---|---|
|`minPodLifetimeSeconds`|uint|
|`excludeOwnerKinds`|list(string)|
|`reasons`|list(string)|
|`includingInitContainers`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveFailedPods":
enabled: true
params:
failedPods:
reasons:
- "NodeAffinity"
includingInitContainers: true
excludeOwnerKinds:
- "Job"
minPodLifetimeSeconds: 3600
```
## Filter Pods ## Filter Pods
### Namespace filtering ### Namespace filtering
@@ -532,6 +574,7 @@ The following strategies accept a `namespaces` parameter which allows to specify
* `RemovePodsViolatingInterPodAntiAffinity` * `RemovePodsViolatingInterPodAntiAffinity`
* `RemoveDuplicates` * `RemoveDuplicates`
* `RemovePodsViolatingTopologySpreadConstraint` * `RemovePodsViolatingTopologySpreadConstraint`
* `RemoveFailedPods`
For example: For example:
@@ -614,7 +657,7 @@ does not exist, descheduler won't create it and will throw an error.
### Label filtering ### Label filtering
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#labelselector-v1-meta) The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta)
to filter pods by their labels: to filter pods by their labels:
* `PodLifeTime` * `PodLifeTime`
@@ -623,6 +666,7 @@ to filter pods by their labels:
* `RemovePodsViolatingNodeAffinity` * `RemovePodsViolatingNodeAffinity`
* `RemovePodsViolatingInterPodAntiAffinity` * `RemovePodsViolatingInterPodAntiAffinity`
* `RemovePodsViolatingTopologySpreadConstraint` * `RemovePodsViolatingTopologySpreadConstraint`
* `RemoveFailedPods`
This allows running strategies among pods the descheduler is interested in. This allows running strategies among pods the descheduler is interested in.
@@ -657,6 +701,7 @@ The following strategies accept a `nodeFit` boolean parameter which can optimize
* `RemovePodsViolatingNodeTaints` * `RemovePodsViolatingNodeTaints`
* `RemovePodsViolatingTopologySpreadConstraint` * `RemovePodsViolatingTopologySpreadConstraint`
* `RemovePodsHavingTooManyRestarts` * `RemovePodsHavingTooManyRestarts`
* `RemoveFailedPods`
If set to `true` the descheduler will consider whether or not the pods that meet eviction criteria will fit on other nodes before evicting them. If a pod cannot be rescheduled to another node, it will not be evicted. Currently the following criteria are considered when setting `nodeFit` to `true`: If set to `true` the descheduler will consider whether or not the pods that meet eviction criteria will fit on other nodes before evicting them. If a pod cannot be rescheduled to another node, it will not be evicted. Currently the following criteria are considered when setting `nodeFit` to `true`:
- A `nodeSelector` on the pod - A `nodeSelector` on the pod
@@ -697,8 +742,8 @@ Using Deployments instead of ReplicationControllers provides an automated rollou
When the descheduler decides to evict pods from a node, it employs the following general mechanism: When the descheduler decides to evict pods from a node, it employs the following general mechanism:
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted (unless `evictSystemCriticalPods: true` is set). * [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted (unless `evictSystemCriticalPods: true` is set).
* Pods (static or mirrored pods or stand alone pods) not part of an ReplicationController, ReplicaSet(Deployment), StatefulSet, or Job are * Pods (static or mirrored pods or standalone pods) not part of an ReplicationController, ReplicaSet(Deployment), StatefulSet, or Job are
never evicted because these pods won't be recreated. never evicted because these pods won't be recreated. (Standalone pods in failed status phase can be evicted by setting `evictFailedBarePods: true`)
* Pods associated with DaemonSets are never evicted. * Pods associated with DaemonSets are never evicted.
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set). * Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set).
* Pods with PVCs are evicted (unless `ignorePvcPods: true` is set). * Pods with PVCs are evicted (unless `ignorePvcPods: true` is set).
@@ -707,6 +752,7 @@ best effort pods are evicted before burstable and guaranteed pods.
* All types of pods with the annotation `descheduler.alpha.kubernetes.io/evict` are eligible for eviction. This * All types of pods with the annotation `descheduler.alpha.kubernetes.io/evict` are eligible for eviction. This
annotation is used to override checks which prevent eviction and users can select which pod is evicted. annotation is used to override checks which prevent eviction and users can select which pod is evicted.
Users should know how and if the pod will be recreated. Users should know how and if the pod will be recreated.
* Pods with a non-nil DeletionTimestamp are not evicted by default.
Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable. Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable.
@@ -736,6 +782,7 @@ packages that it is compiled with.
Descheduler | Supported Kubernetes Version Descheduler | Supported Kubernetes Version
-------------|----------------------------- -------------|-----------------------------
v0.22 | v1.22
v0.21 | v1.21 v0.21 | v1.21
v0.20 | v1.20 v0.20 | v1.20
v0.19 | v1.19 v0.19 | v1.19

View File

@@ -1,7 +1,7 @@
apiVersion: v1 apiVersion: v1
name: descheduler name: descheduler
version: 0.21.0 version: 0.23.0
appVersion: 0.21.0 appVersion: 0.23.0
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that. description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
keywords: keywords:
- kubernetes - kubernetes

View File

@@ -45,9 +45,11 @@ The following table lists the configurable parameters of the _descheduler_ chart
| Parameter | Description | Default | | Parameter | Description | Default |
| ------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------ | | ------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------ |
| `kind` | Use as CronJob or Deployment | `CronJob` |
| `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` | | `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` |
| `image.tag` | Docker tag to use | `v[chart appVersion]` | | `image.tag` | Docker tag to use | `v[chart appVersion]` |
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` | | `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
| `imagePullSecrets` | Docker repository secrets | `[]` |
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` | | `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` | | `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` | | `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
@@ -55,6 +57,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` | | `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` | | `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` |
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `nil` | | `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `nil` |
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ | | `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ | | `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` | | `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
@@ -64,3 +67,6 @@ The following table lists the configurable parameters of the _descheduler_ chart
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` | | `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` | | `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
| `nodeSelector` | Node selectors to run the descheduler cronjob on specific nodes | `nil` | | `nodeSelector` | Node selectors to run the descheduler cronjob on specific nodes | `nil` |
| `tolerations` | tolerations to run the descheduler cronjob on specific nodes | `nil` |
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
| `commonLabels` | Labels to apply to all resources | `{}` |

View File

@@ -1 +1 @@
Descheduler installed as a cron job. Descheduler installed as a {{ .Values.kind }} .

View File

@@ -42,6 +42,17 @@ app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }} {{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.commonLabels}}
{{ toYaml .Values.commonLabels }}
{{- end }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "descheduler.selectorLabels" -}}
app.kubernetes.io/name: {{ include "descheduler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}} {{- end -}}
{{/* {{/*

View File

@@ -1,3 +1,4 @@
{{- if eq .Values.kind "CronJob" }}
apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }} apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
kind: CronJob kind: CronJob
metadata: metadata:
@@ -6,6 +7,9 @@ metadata:
{{- include "descheduler.labels" . | nindent 4 }} {{- include "descheduler.labels" . | nindent 4 }}
spec: spec:
schedule: {{ .Values.schedule | quote }} schedule: {{ .Values.schedule | quote }}
{{- if .Values.suspend }}
suspend: {{ .Values.suspend }}
{{- end }}
concurrencyPolicy: "Forbid" concurrencyPolicy: "Forbid"
{{- if .Values.startingDeadlineSeconds }} {{- if .Values.startingDeadlineSeconds }}
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }} startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
@@ -27,8 +31,7 @@ spec:
{{- .Values.podAnnotations | toYaml | nindent 12 }} {{- .Values.podAnnotations | toYaml | nindent 12 }}
{{- end }} {{- end }}
labels: labels:
app.kubernetes.io/name: {{ include "descheduler.name" . }} {{- include "descheduler.selectorLabels" . | nindent 12 }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.podLabels }} {{- if .Values.podLabels }}
{{- .Values.podLabels | toYaml | nindent 12 }} {{- .Values.podLabels | toYaml | nindent 12 }}
{{- end }} {{- end }}
@@ -37,11 +40,23 @@ spec:
nodeSelector: nodeSelector:
{{- toYaml . | nindent 12 }} {{- toYaml . | nindent 12 }}
{{- end }} {{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.priorityClassName }} {{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }} priorityClassName: {{ .Values.priorityClassName }}
{{- end }} {{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }} serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
restartPolicy: "Never" restartPolicy: "Never"
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 10 }}
{{- end }}
containers: containers:
- name: {{ .Chart.Name }} - name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}" image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
@@ -57,6 +72,8 @@ spec:
- {{ $value | quote }} - {{ $value | quote }}
{{- end }} {{- end }}
{{- end }} {{- end }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 16 }}
resources: resources:
{{- toYaml .Values.resources | nindent 16 }} {{- toYaml .Values.resources | nindent 16 }}
securityContext: securityContext:
@@ -74,3 +91,4 @@ spec:
- name: policy-volume - name: policy-volume
configMap: configMap:
name: {{ template "descheduler.fullname" . }} name: {{ template "descheduler.fullname" . }}
{{- end }}

View File

@@ -0,0 +1,81 @@
{{- if eq .Values.kind "Deployment" }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "descheduler.fullname" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
replicas: 1
selector:
matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "descheduler.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{- .Values.podLabels | toYaml | nindent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.podAnnotations }}
{{- .Values.podAnnotations | toYaml | nindent 8 }}
{{- end }}
spec:
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--descheduling-interval"
- {{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
{{- range $key, $value := .Values.cmdOptions }}
- {{ printf "--%s" $key | quote }}
{{- if $value }}
- {{ $value | quote }}
{{- end }}
{{- end }}
ports:
- containerPort: 10258
protocol: TCP
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
volumes:
- name: policy-volume
configMap:
name: {{ template "descheduler.fullname" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -5,4 +5,7 @@ metadata:
name: {{ template "descheduler.serviceAccountName" . }} name: {{ template "descheduler.serviceAccountName" . }}
labels: labels:
{{- include "descheduler.labels" . | nindent 4 }} {{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.serviceAccount.annotations }}
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
{{- end }}
{{- end -}} {{- end -}}

View File

@@ -2,12 +2,17 @@
# This is a YAML-formatted file. # This is a YAML-formatted file.
# Declare variables to be passed into your templates. # Declare variables to be passed into your templates.
# CronJob or Deployment
kind: CronJob
image: image:
repository: k8s.gcr.io/descheduler/descheduler repository: k8s.gcr.io/descheduler/descheduler
# Overrides the image tag whose default is the chart version # Overrides the image tag whose default is the chart version
tag: "" tag: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
imagePullSecrets: []
resources: resources:
requests: requests:
cpu: 500m cpu: 500m
@@ -19,12 +24,19 @@ resources:
nameOverride: "" nameOverride: ""
fullnameOverride: "" fullnameOverride: ""
# labels that'll be applied to all resources
commonLabels: {}
cronJobApiVersion: "batch/v1" # Use "batch/v1beta1" for k8s version < 1.21.0. TODO(@7i) remove with 1.23 release cronJobApiVersion: "batch/v1" # Use "batch/v1beta1" for k8s version < 1.21.0. TODO(@7i) remove with 1.23 release
schedule: "*/2 * * * *" schedule: "*/2 * * * *"
suspend: false
#startingDeadlineSeconds: 200 #startingDeadlineSeconds: 200
#successfulJobsHistoryLimit: 1 #successfulJobsHistoryLimit: 1
#failedJobsHistoryLimit: 1 #failedJobsHistoryLimit: 1
# Required when running as a Deployment
deschedulingInterval: 5m
cmdOptions: cmdOptions:
v: 3 v: 3
# evict-local-storage-pods: # evict-local-storage-pods:
@@ -62,6 +74,23 @@ priorityClassName: system-cluster-critical
nodeSelector: {} nodeSelector: {}
# foo: bar # foo: bar
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
tolerations: []
# - key: 'management'
# operator: 'Equal'
# value: 'tool'
# effect: 'NoSchedule'
rbac: rbac:
# Specifies whether RBAC resources should be created # Specifies whether RBAC resources should be created
create: true create: true
@@ -76,3 +105,15 @@ serviceAccount:
# The name of the ServiceAccount to use. # The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template # If not set and create is true, a name is generated using the fullname template
name: name:
# Specifies custom annotations for the serviceAccount
annotations: {}
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10

View File

@@ -7,7 +7,7 @@ timeout: 1200s
options: options:
substitution_option: ALLOW_LOOSE substitution_option: ALLOW_LOOSE
steps: steps:
- name: 'gcr.io/k8s-testimages/gcb-docker-gcloud:v20190906-745fed4' - name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20211118-2f2d816b90'
entrypoint: make entrypoint: make
env: env:
- DOCKER_CLI_EXPERIMENTAL=enabled - DOCKER_CLI_EXPERIMENTAL=enabled

View File

@@ -20,10 +20,8 @@ package options
import ( import (
"github.com/spf13/pflag" "github.com/spf13/pflag"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
apiserveroptions "k8s.io/apiserver/pkg/server/options" apiserveroptions "k8s.io/apiserver/pkg/server/options"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-base/logs"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig" "sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1" "sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
@@ -39,7 +37,6 @@ type DeschedulerServer struct {
componentconfig.DeschedulerConfiguration componentconfig.DeschedulerConfiguration
Client clientset.Interface Client clientset.Interface
Logs *logs.Options
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
DisableMetrics bool DisableMetrics bool
} }
@@ -56,18 +53,10 @@ func NewDeschedulerServer() (*DeschedulerServer, error) {
return &DeschedulerServer{ return &DeschedulerServer{
DeschedulerConfiguration: *cfg, DeschedulerConfiguration: *cfg,
Logs: logs.NewOptions(),
SecureServing: secureServing, SecureServing: secureServing,
}, nil }, nil
} }
// Validation checks for DeschedulerServer.
func (s *DeschedulerServer) Validate() error {
var errs []error
errs = append(errs, s.Logs.Validate()...)
return utilerrors.NewAggregate(errs)
}
func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, error) { func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, error) {
versionedCfg := v1alpha1.DeschedulerConfiguration{} versionedCfg := v1alpha1.DeschedulerConfiguration{}
deschedulerscheme.Scheme.Default(&versionedCfg) deschedulerscheme.Scheme.Default(&versionedCfg)
@@ -85,12 +74,6 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.") fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.") fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.") fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "DEPRECATED: selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler")
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "DEPRECATED: enables evicting pods using local storage by descheduler")
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.") fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
rs.SecureServing.AddFlags(fs) rs.SecureServing.AddFlags(fs)

View File

@@ -19,8 +19,11 @@ package app
import ( import (
"context" "context"
"flag"
"io" "io"
"os/signal"
"syscall"
"k8s.io/apiserver/pkg/server/healthz"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/descheduler" "sigs.k8s.io/descheduler/pkg/descheduler"
@@ -30,7 +33,9 @@ import (
apiserver "k8s.io/apiserver/pkg/server" apiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/mux" "k8s.io/apiserver/pkg/server/mux"
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
aflag "k8s.io/component-base/cli/flag" "k8s.io/component-base/config"
_ "k8s.io/component-base/logs/json/register"
"k8s.io/component-base/logs/registry"
"k8s.io/component-base/metrics/legacyregistry" "k8s.io/component-base/metrics/legacyregistry"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
@@ -48,8 +53,7 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
Short: "descheduler", Short: "descheduler",
Long: `The descheduler evicts pods which may be bound to less desired nodes`, Long: `The descheduler evicts pods which may be bound to less desired nodes`,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
s.Logs.LogFormat = s.Logging.Format // s.Logs.Config.Format = s.Logging.Format
s.Logs.Apply()
// LoopbackClientConfig is a config for a privileged loopback connection // LoopbackClientConfig is a config for a privileged loopback connection
var LoopbackClientConfig *restclient.Config var LoopbackClientConfig *restclient.Config
@@ -59,23 +63,30 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
return return
} }
if err := s.Validate(); err != nil { factory, _ := registry.LogRegistry.Get(s.Logging.Format)
klog.ErrorS(err, "failed to validate server configuration") if factory == nil {
klog.ClearLogger()
} else {
log, logrFlush := factory.Create(config.FormatOptions{})
defer logrFlush()
klog.SetLogger(log)
}
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer done()
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
if !s.DisableMetrics {
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
}
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
if _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done()); err != nil {
klog.Fatalf("failed to start secure server: %v", err)
return return
} }
if !s.DisableMetrics { err := Run(ctx, s)
ctx := context.TODO()
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
if _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done()); err != nil {
klog.Fatalf("failed to start secure server: %v", err)
return
}
}
err := Run(s)
if err != nil { if err != nil {
klog.ErrorS(err, "descheduler server") klog.ErrorS(err, "descheduler server")
} }
@@ -83,12 +94,10 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
} }
cmd.SetOut(out) cmd.SetOut(out)
flags := cmd.Flags() flags := cmd.Flags()
flags.SetNormalizeFunc(aflag.WordSepNormalizeFunc)
flags.AddGoFlagSet(flag.CommandLine)
s.AddFlags(flags) s.AddFlags(flags)
return cmd return cmd
} }
func Run(rs *options.DeschedulerServer) error { func Run(ctx context.Context, rs *options.DeschedulerServer) error {
return descheduler.Run(rs) return descheduler.Run(ctx, rs)
} }

View File

@@ -17,22 +17,23 @@ limitations under the License.
package main package main
import ( import (
"fmt"
"k8s.io/component-base/logs"
"os" "os"
"k8s.io/component-base/cli"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/cmd/descheduler/app" "sigs.k8s.io/descheduler/cmd/descheduler/app"
) )
func init() {
klog.SetOutput(os.Stdout)
klog.InitFlags(nil)
}
func main() { func main() {
out := os.Stdout out := os.Stdout
cmd := app.NewDeschedulerCommand(out) cmd := app.NewDeschedulerCommand(out)
cmd.AddCommand(app.NewVersionCommand()) cmd.AddCommand(app.NewVersionCommand())
logs.InitLogs() code := cli.Run(cmd)
defer logs.FlushLogs() os.Exit(code)
if err := cmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
} }

View File

@@ -9,7 +9,7 @@
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases) 3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION` 4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
5. Publish a draft release using the tag you just created 5. Publish a draft release using the tag you just created
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter) 6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter)
7. Publish release 7. Publish release
8. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release 8. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
@@ -22,7 +22,7 @@
5. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION` 5. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION`
6. Build and push the container image to the staging registry `VERSION=$VERSION make push-all` 6. Build and push the container image to the staging registry `VERSION=$VERSION make push-all`
7. Publish a draft release using the tag you just created 7. Publish a draft release using the tag you just created
8. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter) 8. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter)
9. Publish release 9. Publish release
10. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release 10. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release

View File

@@ -4,6 +4,7 @@ Starting with descheduler release v0.10.0 container images are available in the
Descheduler Version | Container Image | Architectures | Descheduler Version | Container Image | Architectures |
------------------- |-----------------------------------------------------|-------------------------| ------------------- |-----------------------------------------------------|-------------------------|
v0.22.0 | k8s.gcr.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
v0.21.0 | k8s.gcr.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 | v0.21.0 | k8s.gcr.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
v0.20.0 | k8s.gcr.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 | v0.20.0 | k8s.gcr.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
v0.19.0 | k8s.gcr.io/descheduler/descheduler:v0.19.0 | AMD64 | v0.19.0 | k8s.gcr.io/descheduler/descheduler:v0.19.0 | AMD64 |

14
examples/failed-pods.yaml Normal file
View File

@@ -0,0 +1,14 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveFailedPods":
enabled: true
params:
failedPods:
reasons:
- "OutOfcpu"
- "CreateContainerConfigError"
includingInitContainers: true
excludeOwnerKinds:
- "Job"
minPodLifetimeSeconds: 3600 # 1 hour

View File

@@ -1,4 +1,3 @@
---
apiVersion: "descheduler/v1alpha1" apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy" kind: "DeschedulerPolicy"
strategies: strategies:

View File

@@ -1,4 +1,3 @@
---
apiVersion: "descheduler/v1alpha1" apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy" kind: "DeschedulerPolicy"
strategies: strategies:

View File

@@ -1,4 +1,3 @@
---
apiVersion: "descheduler/v1alpha1" apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy" kind: "DeschedulerPolicy"
strategies: strategies:

View File

@@ -23,3 +23,7 @@ strategies:
podsHavingTooManyRestarts: podsHavingTooManyRestarts:
podRestartThreshold: 100 podRestartThreshold: 100
includingInitContainers: true includingInitContainers: true
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: true

View File

@@ -0,0 +1,7 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: true # Include 'ScheduleAnyways' constraints

118
go.mod
View File

@@ -1,19 +1,117 @@
module sigs.k8s.io/descheduler module sigs.k8s.io/descheduler
go 1.16 go 1.17
require ( require (
github.com/client9/misspell v0.3.4 github.com/client9/misspell v0.3.4
github.com/spf13/cobra v1.1.1 github.com/spf13/cobra v1.2.1
github.com/spf13/pflag v1.0.5 github.com/spf13/pflag v1.0.5
k8s.io/api v0.21.0 k8s.io/api v0.23.0
k8s.io/apimachinery v0.21.0 k8s.io/apimachinery v0.23.0
k8s.io/apiserver v0.21.0 k8s.io/apiserver v0.23.0
k8s.io/client-go v0.21.0 k8s.io/client-go v0.23.0
k8s.io/code-generator v0.21.0 k8s.io/code-generator v0.23.0
k8s.io/component-base v0.21.0 k8s.io/component-base v0.23.0
k8s.io/component-helpers v0.21.0 k8s.io/component-helpers v0.23.0
k8s.io/klog/v2 v2.8.0 k8s.io/klog/v2 v2.30.0
k8s.io/kubectl v0.20.5 k8s.io/kubectl v0.20.5
sigs.k8s.io/mdtoc v1.0.1 sigs.k8s.io/mdtoc v1.0.1
) )
require (
cloud.google.com/go v0.81.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.1 // indirect
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-logr/logr v1.2.0 // indirect
github.com/go-logr/zapr v1.2.0 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 // indirect
github.com/google/go-cmp v0.5.5 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/imdario/mergo v0.3.5 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.11.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.28.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.0 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect
go.etcd.io/etcd/client/v3 v3.5.0 // indirect
go.opentelemetry.io/contrib v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect
go.opentelemetry.io/otel v0.20.0 // indirect
go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect
go.opentelemetry.io/otel/metric v0.20.0 // indirect
go.opentelemetry.io/otel/sdk v0.20.0 // indirect
go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect
go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect
go.opentelemetry.io/otel/trace v0.20.0 // indirect
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.0 // indirect
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
golang.org/x/mod v0.4.2 // indirect
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e // indirect
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect
google.golang.org/grpc v1.40.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25 // indirect
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
)

538
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -18,15 +18,15 @@ E2E_GCE_HOME=$DESCHEDULER_ROOT/hack/e2e-gce
create_cluster() { create_cluster() {
echo "#################### Creating instances ##########################" echo "#################### Creating instances ##########################"
gcloud compute instances create descheduler-$master_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b gcloud compute instances create descheduler-$master_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
# Keeping the --zone here so as to make sure that e2e's can run locally. # Keeping the --zone here so as to make sure that e2e's can run locally.
echo "gcloud compute instances delete descheduler-$master_uuid --zone=us-east1-b --quiet" > $E2E_GCE_HOME/delete_cluster.sh echo "gcloud compute instances delete descheduler-$master_uuid --zone=us-east1-b --quiet" > $E2E_GCE_HOME/delete_cluster.sh
gcloud compute instances create descheduler-$node1_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b gcloud compute instances create descheduler-$node1_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
echo "gcloud compute instances delete descheduler-$node1_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh echo "gcloud compute instances delete descheduler-$node1_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
gcloud compute instances create descheduler-$node2_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b gcloud compute instances create descheduler-$node2_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-c --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
# Delete the firewall port created for master. # Delete the firewall port created for master.
echo "gcloud compute firewall-rules delete kubeapiserver-$master_uuid --quiet" >> $E2E_GCE_HOME/delete_cluster.sh echo "gcloud compute firewall-rules delete kubeapiserver-$master_uuid --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
@@ -44,10 +44,10 @@ generate_kubeadm_instance_files() {
transfer_install_files() { transfer_install_files() {
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_install.sh descheduler-$master_uuid:/tmp --zone=us-east1-b gcloud compute scp $E2E_GCE_HOME/kubeadm_install.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-c
} }
@@ -55,19 +55,19 @@ install_kube() {
# Docker installation. # Docker installation.
gcloud compute ssh descheduler-$master_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b gcloud compute ssh descheduler-$master_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
gcloud compute ssh descheduler-$node1_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b gcloud compute ssh descheduler-$node1_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-c
# kubeadm installation. # kubeadm installation.
# 1. Transfer files to master, nodes. # 1. Transfer files to master, nodes.
transfer_install_files transfer_install_files
# 2. Install kubeadm. # 2. Install kubeadm.
#TODO: Add rm /tmp/kubeadm_install.sh #TODO: Add rm /tmp/kubeadm_install.sh
# Open port for kube API server # Open port for kube API server
gcloud compute firewall-rules create kubeapiserver-$master_uuid --allow tcp:6443 --source-tags=descheduler-$master_uuid --source-ranges=0.0.0.0/0 --description="Opening api server port" gcloud compute firewall-rules create kubeapiserver-$master_uuid --allow tcp:6443 --source-tags=descheduler-$master_uuid --source-ranges=0.0.0.0/0 --description="Opening api server port"
gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
kubeadm_join_command=$(gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_install.sh; sudo /tmp/kubeadm_install.sh" --zone=us-east1-b|grep 'kubeadm join') kubeadm_join_command=$(gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_install.sh; sudo /tmp/kubeadm_install.sh" --zone=us-east1-b|grep 'kubeadm join')
# Copy the kubeconfig file onto /tmp for e2e tests. # Copy the kubeconfig file onto /tmp for e2e tests.
gcloud compute ssh descheduler-$master_uuid --command "sudo cp /etc/kubernetes/admin.conf /tmp; sudo chmod 777 /tmp/admin.conf" --zone=us-east1-b gcloud compute ssh descheduler-$master_uuid --command "sudo cp /etc/kubernetes/admin.conf /tmp; sudo chmod 777 /tmp/admin.conf" --zone=us-east1-b
gcloud compute scp descheduler-$master_uuid:/tmp/admin.conf /tmp/admin.conf --zone=us-east1-b gcloud compute scp descheduler-$master_uuid:/tmp/admin.conf /tmp/admin.conf --zone=us-east1-b
@@ -75,16 +75,15 @@ install_kube() {
gcloud compute ssh descheduler-$master_uuid --command "sudo kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml --kubeconfig /etc/kubernetes/admin.conf" --zone=us-east1-b gcloud compute ssh descheduler-$master_uuid --command "sudo kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml --kubeconfig /etc/kubernetes/admin.conf" --zone=us-east1-b
echo $kubeadm_join_command > $E2E_GCE_HOME/kubeadm_join.sh echo $kubeadm_join_command > $E2E_GCE_HOME/kubeadm_join.sh
# Copy kubeadm_join to every node. # Copy kubeadm_join to every node.
#TODO: Put these in a loop, so that extension becomes possible. #TODO: Put these in a loop, so that extension becomes possible.
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-c
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-c
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-c
} }

View File

@@ -3,4 +3,16 @@ apiVersion: kind.x-k8s.io/v1alpha4
nodes: nodes:
- role: control-plane - role: control-plane
- role: worker - role: worker
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "topology.kubernetes.io/zone=local-a"
- role: worker - role: worker
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "topology.kubernetes.io/zone=local-b"

View File

@@ -1,3 +1,4 @@
//go:build tools
// +build tools // +build tools
/* /*

View File

@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version)) GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16') ]]; then if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16|go1.17') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt." echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1 exit 1
fi fi

View File

@@ -28,7 +28,7 @@ pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
echo "Generated output differs:" >&2 echo "Generated output differs:" >&2
echo "${_out}" >&2 echo "${_out}" >&2
echo "Generated conversions verify failed. Please run ./hack/update-generated-conversions.sh (and commit the result)" echo "Generated conversions verify failed. Please run ./hack/update-generated-conversions.sh"
exit 1 exit 1
fi fi
popd > /dev/null 2>&1 popd > /dev/null 2>&1

View File

@@ -28,7 +28,7 @@ pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
echo "Generated deep-copies output differs:" >&2 echo "Generated deep-copies output differs:" >&2
echo "${_out}" >&2 echo "${_out}" >&2
echo "Generated deep-copies verify failed. Please run ./hack/update-generated-deep-copies.sh (and commit the result)" echo "Generated deep-copies verify failed. Please run ./hack/update-generated-deep-copies.sh"
exit 1 exit 1
fi fi
popd > /dev/null 2>&1 popd > /dev/null 2>&1

35
hack/verify-defaulters.sh Executable file
View File

@@ -0,0 +1,35 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
_deschedulertmp="${_tmpdir}"
mkdir -p "${_deschedulertmp}"
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
_deschedulertmp="${_deschedulertmp}/descheduler"
pushd "${_deschedulertmp}" > /dev/null 2>&1
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
${OS_OUTPUT_BINPATH}/defaulter-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--output-file-base zz_generated.defaults
popd > /dev/null 2>&1
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
echo "Generated defaulters output differs:" >&2
echo "${_out}" >&2
echo "Generated defaulters verify failed. Please run ./hack/update-generated-defaulters.sh"
fi
popd > /dev/null 2>&1
echo "Generated Defaulters verified."

View File

@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version)) GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16') ]]; then if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16|go1.17') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt." echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1 exit 1
fi fi

BIN
kind

Binary file not shown.

View File

@@ -16,7 +16,7 @@ spec:
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
containers: containers:
- name: descheduler - name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.21.0 image: k8s.gcr.io/descheduler/descheduler:v0.22.0
volumeMounts: volumeMounts:
- mountPath: /policy-dir - mountPath: /policy-dir
name: policy-volume name: policy-volume
@@ -31,6 +31,14 @@ spec:
requests: requests:
cpu: "500m" cpu: "500m"
memory: "256Mi" memory: "256Mi"
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
securityContext: securityContext:
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities: capabilities:

View File

@@ -19,7 +19,7 @@ spec:
serviceAccountName: descheduler-sa serviceAccountName: descheduler-sa
containers: containers:
- name: descheduler - name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.21.0 image: k8s.gcr.io/descheduler/descheduler:v0.22.0
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
command: command:
- "/bin/descheduler" - "/bin/descheduler"
@@ -33,6 +33,14 @@ spec:
ports: ports:
- containerPort: 10258 - containerPort: 10258
protocol: TCP protocol: TCP
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
resources: resources:
requests: requests:
cpu: 500m cpu: 500m

View File

@@ -14,7 +14,7 @@ spec:
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
containers: containers:
- name: descheduler - name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.21.0 image: k8s.gcr.io/descheduler/descheduler:v0.22.0
volumeMounts: volumeMounts:
- mountPath: /policy-dir - mountPath: /policy-dir
name: policy-volume name: policy-volume
@@ -29,6 +29,14 @@ spec:
requests: requests:
cpu: "500m" cpu: "500m"
memory: "256Mi" memory: "256Mi"
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
securityContext: securityContext:
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities: capabilities:

View File

@@ -34,9 +34,9 @@ var (
&metrics.CounterOpts{ &metrics.CounterOpts{
Subsystem: DeschedulerSubsystem, Subsystem: DeschedulerSubsystem,
Name: "pods_evicted", Name: "pods_evicted",
Help: "Number of evicted pods, by the result, by the strategy, by the namespace. 'failed' result means a pod could not be evicted", Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
StabilityLevel: metrics.ALPHA, StabilityLevel: metrics.ALPHA,
}, []string{"result", "strategy", "namespace"}) }, []string{"result", "strategy", "namespace", "node"})
buildInfo = metrics.NewGauge( buildInfo = metrics.NewGauge(
&metrics.GaugeOpts{ &metrics.GaugeOpts{

View File

@@ -32,6 +32,9 @@ type DeschedulerPolicy struct {
// NodeSelector for a set of nodes to operate over // NodeSelector for a set of nodes to operate over
NodeSelector *string NodeSelector *string
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
EvictFailedBarePods *bool
// EvictLocalStoragePods allows pods using local storage to be evicted. // EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods *bool EvictLocalStoragePods *bool
@@ -42,7 +45,10 @@ type DeschedulerPolicy struct {
IgnorePVCPods *bool IgnorePVCPods *bool
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node. // MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *int MaxNoOfPodsToEvictPerNode *uint
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *uint
} }
type StrategyName string type StrategyName string
@@ -75,6 +81,7 @@ type StrategyParameters struct {
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts PodsHavingTooManyRestarts *PodsHavingTooManyRestarts
PodLifeTime *PodLifeTime PodLifeTime *PodLifeTime
RemoveDuplicates *RemoveDuplicates RemoveDuplicates *RemoveDuplicates
FailedPods *FailedPods
IncludeSoftConstraints bool IncludeSoftConstraints bool
Namespaces *Namespaces Namespaces *Namespaces
ThresholdPriority *int32 ThresholdPriority *int32
@@ -105,3 +112,10 @@ type PodLifeTime struct {
MaxPodLifeTimeSeconds *uint MaxPodLifeTimeSeconds *uint
PodStatusPhases []string PodStatusPhases []string
} }
type FailedPods struct {
ExcludeOwnerKinds []string
MinPodLifetimeSeconds *uint
Reasons []string
IncludingInitContainers bool
}

View File

@@ -32,6 +32,9 @@ type DeschedulerPolicy struct {
// NodeSelector for a set of nodes to operate over // NodeSelector for a set of nodes to operate over
NodeSelector *string `json:"nodeSelector,omitempty"` NodeSelector *string `json:"nodeSelector,omitempty"`
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
EvictFailedBarePods *bool `json:"evictFailedBarePods,omitempty"`
// EvictLocalStoragePods allows pods using local storage to be evicted. // EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"` EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"`
@@ -43,6 +46,9 @@ type DeschedulerPolicy struct {
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node. // MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *int `json:"maxNoOfPodsToEvictPerNode,omitempty"` MaxNoOfPodsToEvictPerNode *int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *int `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
} }
type StrategyName string type StrategyName string
@@ -73,6 +79,7 @@ type StrategyParameters struct {
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"` PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
PodLifeTime *PodLifeTime `json:"podLifeTime,omitempty"` PodLifeTime *PodLifeTime `json:"podLifeTime,omitempty"`
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"` RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
FailedPods *FailedPods `json:"failedPods,omitempty"`
IncludeSoftConstraints bool `json:"includeSoftConstraints"` IncludeSoftConstraints bool `json:"includeSoftConstraints"`
Namespaces *Namespaces `json:"namespaces"` Namespaces *Namespaces `json:"namespaces"`
ThresholdPriority *int32 `json:"thresholdPriority"` ThresholdPriority *int32 `json:"thresholdPriority"`
@@ -103,3 +110,10 @@ type PodLifeTime struct {
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"` MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
PodStatusPhases []string `json:"podStatusPhases,omitempty"` PodStatusPhases []string `json:"podStatusPhases,omitempty"`
} }
type FailedPods struct {
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
MinPodLifetimeSeconds *uint `json:"minPodLifetimeSeconds,omitempty"`
Reasons []string `json:"reasons,omitempty"`
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
}

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -56,6 +57,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddGeneratedConversionFunc((*FailedPods)(nil), (*api.FailedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_FailedPods_To_api_FailedPods(a.(*FailedPods), b.(*api.FailedPods), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.FailedPods)(nil), (*FailedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_FailedPods_To_v1alpha1_FailedPods(a.(*api.FailedPods), b.(*FailedPods), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Namespaces)(nil), (*api.Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddGeneratedConversionFunc((*Namespaces)(nil), (*api.Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Namespaces_To_api_Namespaces(a.(*Namespaces), b.(*api.Namespaces), scope) return Convert_v1alpha1_Namespaces_To_api_Namespaces(a.(*Namespaces), b.(*api.Namespaces), scope)
}); err != nil { }); err != nil {
@@ -122,10 +133,24 @@ func RegisterConversions(s *runtime.Scheme) error {
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error { func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies)) out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector)) out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.EvictFailedBarePods = (*bool)(unsafe.Pointer(in.EvictFailedBarePods))
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods)) out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods)) out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods)) out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode)) if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(uint)
**out = uint(**in)
} else {
out.MaxNoOfPodsToEvictPerNode = nil
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = uint(**in)
} else {
out.MaxNoOfPodsToEvictPerNamespace = nil
}
return nil return nil
} }
@@ -137,10 +162,24 @@ func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Descheduler
func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error { func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies)) out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector)) out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.EvictFailedBarePods = (*bool)(unsafe.Pointer(in.EvictFailedBarePods))
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods)) out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods)) out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods)) out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode)) if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(int)
**out = int(**in)
} else {
out.MaxNoOfPodsToEvictPerNode = nil
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(int)
**out = int(**in)
} else {
out.MaxNoOfPodsToEvictPerNamespace = nil
}
return nil return nil
} }
@@ -173,6 +212,32 @@ func Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.Des
return autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in, out, s) return autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in, out, s)
} }
func autoConvert_v1alpha1_FailedPods_To_api_FailedPods(in *FailedPods, out *api.FailedPods, s conversion.Scope) error {
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
out.MinPodLifetimeSeconds = (*uint)(unsafe.Pointer(in.MinPodLifetimeSeconds))
out.Reasons = *(*[]string)(unsafe.Pointer(&in.Reasons))
out.IncludingInitContainers = in.IncludingInitContainers
return nil
}
// Convert_v1alpha1_FailedPods_To_api_FailedPods is an autogenerated conversion function.
func Convert_v1alpha1_FailedPods_To_api_FailedPods(in *FailedPods, out *api.FailedPods, s conversion.Scope) error {
return autoConvert_v1alpha1_FailedPods_To_api_FailedPods(in, out, s)
}
func autoConvert_api_FailedPods_To_v1alpha1_FailedPods(in *api.FailedPods, out *FailedPods, s conversion.Scope) error {
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
out.MinPodLifetimeSeconds = (*uint)(unsafe.Pointer(in.MinPodLifetimeSeconds))
out.Reasons = *(*[]string)(unsafe.Pointer(&in.Reasons))
out.IncludingInitContainers = in.IncludingInitContainers
return nil
}
// Convert_api_FailedPods_To_v1alpha1_FailedPods is an autogenerated conversion function.
func Convert_api_FailedPods_To_v1alpha1_FailedPods(in *api.FailedPods, out *FailedPods, s conversion.Scope) error {
return autoConvert_api_FailedPods_To_v1alpha1_FailedPods(in, out, s)
}
func autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error { func autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error {
out.Include = *(*[]string)(unsafe.Pointer(&in.Include)) out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude)) out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
@@ -289,6 +354,7 @@ func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *Strat
out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts)) out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
out.PodLifeTime = (*api.PodLifeTime)(unsafe.Pointer(in.PodLifeTime)) out.PodLifeTime = (*api.PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
out.RemoveDuplicates = (*api.RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates)) out.RemoveDuplicates = (*api.RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
out.FailedPods = (*api.FailedPods)(unsafe.Pointer(in.FailedPods))
out.IncludeSoftConstraints = in.IncludeSoftConstraints out.IncludeSoftConstraints = in.IncludeSoftConstraints
out.Namespaces = (*api.Namespaces)(unsafe.Pointer(in.Namespaces)) out.Namespaces = (*api.Namespaces)(unsafe.Pointer(in.Namespaces))
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority)) out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
@@ -309,6 +375,7 @@ func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.S
out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts)) out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
out.PodLifeTime = (*PodLifeTime)(unsafe.Pointer(in.PodLifeTime)) out.PodLifeTime = (*PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
out.RemoveDuplicates = (*RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates)) out.RemoveDuplicates = (*RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
out.FailedPods = (*FailedPods)(unsafe.Pointer(in.FailedPods))
out.IncludeSoftConstraints = in.IncludeSoftConstraints out.IncludeSoftConstraints = in.IncludeSoftConstraints
out.Namespaces = (*Namespaces)(unsafe.Pointer(in.Namespaces)) out.Namespaces = (*Namespaces)(unsafe.Pointer(in.Namespaces))
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority)) out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -41,6 +42,11 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(string) *out = new(string)
**out = **in **out = **in
} }
if in.EvictFailedBarePods != nil {
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
*out = new(bool)
**out = **in
}
if in.EvictLocalStoragePods != nil { if in.EvictLocalStoragePods != nil {
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
*out = new(bool) *out = new(bool)
@@ -61,6 +67,11 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(int) *out = new(int)
**out = **in **out = **in
} }
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(int)
**out = **in
}
return return
} }
@@ -103,6 +114,37 @@ func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MinPodLifetimeSeconds != nil {
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
*out = new(uint)
**out = **in
}
if in.Reasons != nil {
in, out := &in.Reasons, &out.Reasons
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
func (in *FailedPods) DeepCopy() *FailedPods {
if in == nil {
return nil
}
out := new(FailedPods)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespaces) DeepCopyInto(out *Namespaces) { func (in *Namespaces) DeepCopyInto(out *Namespaces) {
*out = *in *out = *in
@@ -294,6 +336,11 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = new(RemoveDuplicates) *out = new(RemoveDuplicates)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.FailedPods != nil {
in, out := &in.FailedPods, &out.FailedPods
*out = new(FailedPods)
(*in).DeepCopyInto(*out)
}
if in.Namespaces != nil { if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces in, out := &in.Namespaces, &out.Namespaces
*out = new(Namespaces) *out = new(Namespaces)

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -41,6 +42,11 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(string) *out = new(string)
**out = **in **out = **in
} }
if in.EvictFailedBarePods != nil {
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
*out = new(bool)
**out = **in
}
if in.EvictLocalStoragePods != nil { if in.EvictLocalStoragePods != nil {
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
*out = new(bool) *out = new(bool)
@@ -58,7 +64,12 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
} }
if in.MaxNoOfPodsToEvictPerNode != nil { if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(int) *out = new(uint)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = **in **out = **in
} }
return return
@@ -103,6 +114,37 @@ func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MinPodLifetimeSeconds != nil {
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
*out = new(uint)
**out = **in
}
if in.Reasons != nil {
in, out := &in.Reasons, &out.Reasons
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
func (in *FailedPods) DeepCopy() *FailedPods {
if in == nil {
return nil
}
out := new(FailedPods)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespaces) DeepCopyInto(out *Namespaces) { func (in *Namespaces) DeepCopyInto(out *Namespaces) {
*out = *in *out = *in
@@ -294,6 +336,11 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = new(RemoveDuplicates) *out = new(RemoveDuplicates)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.FailedPods != nil {
in, out := &in.FailedPods, &out.FailedPods
*out = new(FailedPods)
(*in).DeepCopyInto(*out)
}
if in.Namespaces != nil { if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces in, out := &in.Namespaces, &out.Namespaces
*out = new(Namespaces) *out = new(Namespaces)

View File

@@ -17,9 +17,10 @@ limitations under the License.
package v1alpha1 package v1alpha1
import ( import (
componentbaseconfig "k8s.io/component-base/config"
"time" "time"
componentbaseconfig "k8s.io/component-base/config"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -28,7 +29,7 @@ import (
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) { func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in *out = *in
out.TypeMeta = in.TypeMeta out.TypeMeta = in.TypeMeta
out.Logging = in.Logging in.Logging.DeepCopyInto(&out.Logging)
return return
} }

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2021 The Kubernetes Authors. Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -28,7 +29,7 @@ import (
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) { func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in *out = *in
out.TypeMeta = in.TypeMeta out.TypeMeta = in.TypeMeta
out.Logging = in.Logging in.Logging.DeepCopyInto(&out.Logging)
return return
} }

View File

@@ -19,14 +19,22 @@ package descheduler
import ( import (
"context" "context"
"fmt" "fmt"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes" policy "k8s.io/api/policy/v1beta1"
"k8s.io/klog/v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
fakeclientset "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/klog/v2"
corev1informers "k8s.io/client-go/informers/core/v1"
schedulingv1informers "k8s.io/client-go/informers/scheduling/v1"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/metrics" "sigs.k8s.io/descheduler/metrics"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
@@ -34,13 +42,14 @@ import (
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils" eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies" "sigs.k8s.io/descheduler/pkg/descheduler/strategies"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
) )
func Run(rs *options.DeschedulerServer) error { func Run(ctx context.Context, rs *options.DeschedulerServer) error {
metrics.Register() metrics.Register()
ctx := context.Background()
rsclient, err := client.CreateClient(rs.KubeconfigFile) rsclient, err := client.CreateClient(rs.KubeconfigFile)
if err != nil { if err != nil {
return err return err
@@ -60,15 +69,108 @@ func Run(rs *options.DeschedulerServer) error {
return err return err
} }
// tie in root ctx with our wait stopChannel
stopChannel := make(chan struct{}) stopChannel := make(chan struct{})
go func() {
<-ctx.Done()
close(stopChannel)
}()
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel) return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
} }
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc)
func cachedClient(
realClient clientset.Interface,
podInformer corev1informers.PodInformer,
nodeInformer corev1informers.NodeInformer,
namespaceInformer corev1informers.NamespaceInformer,
priorityClassInformer schedulingv1informers.PriorityClassInformer,
) (clientset.Interface, error) {
fakeClient := fakeclientset.NewSimpleClientset()
// simulate a pod eviction by deleting a pod
fakeClient.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" {
createAct, matched := action.(core.CreateActionImpl)
if !matched {
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
}
eviction, matched := createAct.Object.(*policy.Eviction)
if !matched {
return false, nil, fmt.Errorf("unable to convert action object into *policy.Eviction")
}
if err := fakeClient.Tracker().Delete(action.GetResource(), eviction.GetNamespace(), eviction.GetName()); err != nil {
return false, nil, fmt.Errorf("unable to delete pod %v/%v: %v", eviction.GetNamespace(), eviction.GetName(), err)
}
return true, nil, nil
}
// fallback to the default reactor
return false, nil, nil
})
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
pods, err := podInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list pods: %v", err)
}
for _, item := range pods {
if _, err := fakeClient.CoreV1().Pods(item.Namespace).Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy pod: %v", err)
}
}
nodes, err := nodeInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list nodes: %v", err)
}
for _, item := range nodes {
if _, err := fakeClient.CoreV1().Nodes().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy node: %v", err)
}
}
namespaces, err := namespaceInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list namespaces: %v", err)
}
for _, item := range namespaces {
if _, err := fakeClient.CoreV1().Namespaces().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy node: %v", err)
}
}
priorityClasses, err := priorityClassInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list priorityclasses: %v", err)
}
for _, item := range priorityClasses {
if _, err := fakeClient.SchedulingV1().PriorityClasses().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy priorityclass: %v", err)
}
}
return fakeClient, nil
}
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error { func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0) sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
nodeInformer := sharedInformerFactory.Core().V1().Nodes() nodeInformer := sharedInformerFactory.Core().V1().Nodes()
podInformer := sharedInformerFactory.Core().V1().Pods()
namespaceInformer := sharedInformerFactory.Core().V1().Namespaces()
priorityClassInformer := sharedInformerFactory.Scheduling().V1().PriorityClasses()
// create the informers
namespaceInformer.Informer()
priorityClassInformer.Informer()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
return fmt.Errorf("build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(stopChannel) sharedInformerFactory.Start(stopChannel)
sharedInformerFactory.WaitForCacheSync(stopChannel) sharedInformerFactory.WaitForCacheSync(stopChannel)
@@ -83,18 +185,27 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
"RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts, "RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts,
"PodLifeTime": strategies.PodLifeTime, "PodLifeTime": strategies.PodLifeTime,
"RemovePodsViolatingTopologySpreadConstraint": strategies.RemovePodsViolatingTopologySpreadConstraint, "RemovePodsViolatingTopologySpreadConstraint": strategies.RemovePodsViolatingTopologySpreadConstraint,
"RemoveFailedPods": strategies.RemoveFailedPods,
} }
nodeSelector := rs.NodeSelector var nodeSelector string
if deschedulerPolicy.NodeSelector != nil { if deschedulerPolicy.NodeSelector != nil {
nodeSelector = *deschedulerPolicy.NodeSelector nodeSelector = *deschedulerPolicy.NodeSelector
} }
evictLocalStoragePods := rs.EvictLocalStoragePods var evictLocalStoragePods bool
if deschedulerPolicy.EvictLocalStoragePods != nil { if deschedulerPolicy.EvictLocalStoragePods != nil {
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
} }
evictBarePods := false
if deschedulerPolicy.EvictFailedBarePods != nil {
evictBarePods = *deschedulerPolicy.EvictFailedBarePods
if evictBarePods {
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
}
}
evictSystemCriticalPods := false evictSystemCriticalPods := false
if deschedulerPolicy.EvictSystemCriticalPods != nil { if deschedulerPolicy.EvictSystemCriticalPods != nil {
evictSystemCriticalPods = *deschedulerPolicy.EvictSystemCriticalPods evictSystemCriticalPods = *deschedulerPolicy.EvictSystemCriticalPods
@@ -108,12 +219,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
} }
maxNoOfPodsToEvictPerNode := rs.MaxNoOfPodsToEvictPerNode wait.NonSlidingUntil(func() {
if deschedulerPolicy.MaxNoOfPodsToEvictPerNode != nil {
maxNoOfPodsToEvictPerNode = *deschedulerPolicy.MaxNoOfPodsToEvictPerNode
}
wait.Until(func() {
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector) nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector)
if err != nil { if err != nil {
klog.V(1).InfoS("Unable to get ready nodes", "err", err) klog.V(1).InfoS("Unable to get ready nodes", "err", err)
@@ -127,21 +233,55 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
return return
} }
var podEvictorClient clientset.Interface
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
// So when evicting pods while running multiple strategies in a row have the cummulative effect
// as is when evicting pods for real.
if rs.DryRun {
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
// Create a new cache so we start from scratch without any leftovers
fakeClient, err := cachedClient(rs.Client, podInformer, nodeInformer, namespaceInformer, priorityClassInformer)
if err != nil {
klog.Error(err)
return
}
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods())
if err != nil {
klog.Errorf("build get pods assigned to node function error: %v", err)
return
}
fakeCtx, cncl := context.WithCancel(context.TODO())
defer cncl()
fakeSharedInformerFactory.Start(fakeCtx.Done())
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
podEvictorClient = fakeClient
} else {
podEvictorClient = rs.Client
}
klog.V(3).Infof("Building a pod evictor")
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
rs.Client, podEvictorClient,
evictionPolicyGroupVersion, evictionPolicyGroupVersion,
rs.DryRun, rs.DryRun,
maxNoOfPodsToEvictPerNode, deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
nodes, nodes,
evictLocalStoragePods, evictLocalStoragePods,
evictSystemCriticalPods, evictSystemCriticalPods,
ignorePvcPods, ignorePvcPods,
evictBarePods,
!rs.DisableMetrics,
) )
for name, strategy := range deschedulerPolicy.Strategies { for name, strategy := range deschedulerPolicy.Strategies {
if f, ok := strategyFuncs[name]; ok { if f, ok := strategyFuncs[name]; ok {
if strategy.Enabled { if strategy.Enabled {
f(ctx, rs.Client, strategy, nodes, podEvictor) f(ctx, rs.Client, strategy, nodes, podEvictor, getPodsAssignedToNode)
} }
} else { } else {
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name) klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)

View File

@@ -55,7 +55,7 @@ func TestTaintsUpdated(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err) t.Fatalf("Unable to run descheduler strategies: %v", err)
} }
case <-time.After(300 * time.Millisecond): case <-time.After(1 * time.Second):
// Wait for few cycles and then verify the only pod still exists // Wait for few cycles and then verify the only pod still exists
} }

View File

@@ -45,57 +45,70 @@ const (
) )
// nodePodEvictedCount keeps count of pods evicted on node // nodePodEvictedCount keeps count of pods evicted on node
type nodePodEvictedCount map[*v1.Node]int type nodePodEvictedCount map[*v1.Node]uint
type namespacePodEvictCount map[string]uint
type PodEvictor struct { type PodEvictor struct {
client clientset.Interface client clientset.Interface
nodes []*v1.Node nodes []*v1.Node
policyGroupVersion string policyGroupVersion string
dryRun bool dryRun bool
maxPodsToEvictPerNode int maxPodsToEvictPerNode *uint
nodepodCount nodePodEvictedCount maxPodsToEvictPerNamespace *uint
evictLocalStoragePods bool nodepodCount nodePodEvictedCount
evictSystemCriticalPods bool namespacePodCount namespacePodEvictCount
ignorePvcPods bool evictFailedBarePods bool
evictLocalStoragePods bool
evictSystemCriticalPods bool
ignorePvcPods bool
metricsEnabled bool
} }
func NewPodEvictor( func NewPodEvictor(
client clientset.Interface, client clientset.Interface,
policyGroupVersion string, policyGroupVersion string,
dryRun bool, dryRun bool,
maxPodsToEvictPerNode int, maxPodsToEvictPerNode *uint,
maxPodsToEvictPerNamespace *uint,
nodes []*v1.Node, nodes []*v1.Node,
evictLocalStoragePods bool, evictLocalStoragePods bool,
evictSystemCriticalPods bool, evictSystemCriticalPods bool,
ignorePvcPods bool, ignorePvcPods bool,
evictFailedBarePods bool,
metricsEnabled bool,
) *PodEvictor { ) *PodEvictor {
var nodePodCount = make(nodePodEvictedCount) var nodePodCount = make(nodePodEvictedCount)
var namespacePodCount = make(namespacePodEvictCount)
for _, node := range nodes { for _, node := range nodes {
// Initialize podsEvicted till now with 0. // Initialize podsEvicted till now with 0.
nodePodCount[node] = 0 nodePodCount[node] = 0
} }
return &PodEvictor{ return &PodEvictor{
client: client, client: client,
nodes: nodes, nodes: nodes,
policyGroupVersion: policyGroupVersion, policyGroupVersion: policyGroupVersion,
dryRun: dryRun, dryRun: dryRun,
maxPodsToEvictPerNode: maxPodsToEvictPerNode, maxPodsToEvictPerNode: maxPodsToEvictPerNode,
nodepodCount: nodePodCount, maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
evictLocalStoragePods: evictLocalStoragePods, nodepodCount: nodePodCount,
evictSystemCriticalPods: evictSystemCriticalPods, namespacePodCount: namespacePodCount,
ignorePvcPods: ignorePvcPods, evictLocalStoragePods: evictLocalStoragePods,
evictSystemCriticalPods: evictSystemCriticalPods,
evictFailedBarePods: evictFailedBarePods,
ignorePvcPods: ignorePvcPods,
metricsEnabled: metricsEnabled,
} }
} }
// NodeEvicted gives a number of pods evicted for node // NodeEvicted gives a number of pods evicted for node
func (pe *PodEvictor) NodeEvicted(node *v1.Node) int { func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
return pe.nodepodCount[node] return pe.nodepodCount[node]
} }
// TotalEvicted gives a number of pods evicted through all nodes // TotalEvicted gives a number of pods evicted through all nodes
func (pe *PodEvictor) TotalEvicted() int { func (pe *PodEvictor) TotalEvicted() uint {
var total int var total uint
for _, count := range pe.nodepodCount { for _, count := range pe.nodepodCount {
total += count total += count
} }
@@ -110,20 +123,37 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
if len(reasons) > 0 { if len(reasons) > 0 {
reason += " (" + strings.Join(reasons, ", ") + ")" reason += " (" + strings.Join(reasons, ", ") + ")"
} }
if pe.maxPodsToEvictPerNode > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvictPerNode { if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[node]+1 > *pe.maxPodsToEvictPerNode {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number reached", "strategy": strategy, "namespace": pod.Namespace}).Inc() if pe.metricsEnabled {
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvictPerNode, node.Name) metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
}
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", *pe.maxPodsToEvictPerNode, node.Name)
} }
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun) if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
}
return false, fmt.Errorf("Maximum number %v of evicted pods per %q namespace reached", *pe.maxPodsToEvictPerNamespace, pod.Namespace)
}
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
if err != nil { if err != nil {
// err is used only for logging purposes // err is used only for logging purposes
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", reason) klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", reason)
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace}).Inc() if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
}
return false, nil return false, nil
} }
pe.nodepodCount[node]++ pe.nodepodCount[node]++
pe.namespacePodCount[pod.Namespace]++
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
}
if pe.dryRun { if pe.dryRun {
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason) klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason)
} else { } else {
@@ -133,15 +163,11 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)}) eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"}) r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", reason)) r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", reason))
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace}).Inc()
} }
return true, nil return true, nil
} }
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) error { func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string) error {
if dryRun {
return nil
}
deleteOptions := &metav1.DeleteOptions{} deleteOptions := &metav1.DeleteOptions{}
// GracePeriodSeconds ? // GracePeriodSeconds ?
eviction := &policy.Eviction{ eviction := &policy.Eviction{
@@ -215,6 +241,25 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
} }
ev := &evictable{} ev := &evictable{}
if pe.evictFailedBarePods {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
ownerRefList := podutil.OwnerRef(pod)
// Enable evictFailedBarePods to evict bare pods in failed phase
if len(ownerRefList) == 0 && pod.Status.Phase != v1.PodFailed {
return fmt.Errorf("pod does not have any ownerRefs and is not in failed phase")
}
return nil
})
} else {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
ownerRefList := podutil.OwnerRef(pod)
// Moved from IsEvictable function for backward compatibility
if len(ownerRefList) == 0 {
return fmt.Errorf("pod does not have any ownerRefs")
}
return nil
})
}
if !pe.evictSystemCriticalPods { if !pe.evictSystemCriticalPods {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error { ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
// Moved from IsEvictable function to allow for disabling // Moved from IsEvictable function to allow for disabling
@@ -278,10 +323,6 @@ func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod")) checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod"))
} }
if len(ownerRefList) == 0 {
checkErrs = append(checkErrs, fmt.Errorf("pod does not have any ownerrefs"))
}
if utils.IsMirrorPod(pod) { if utils.IsMirrorPod(pod) {
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod")) checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
} }
@@ -290,6 +331,10 @@ func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
checkErrs = append(checkErrs, fmt.Errorf("pod is a static pod")) checkErrs = append(checkErrs, fmt.Errorf("pod is a static pod"))
} }
if utils.IsPodTerminating(pod) {
checkErrs = append(checkErrs, fmt.Errorf("pod is terminating"))
}
for _, c := range ev.constraints { for _, c := range ev.constraints {
if err := c(pod); err != nil { if err := c(pod); err != nil {
checkErrs = append(checkErrs, err) checkErrs = append(checkErrs, err)

View File

@@ -62,7 +62,7 @@ func TestEvictPod(t *testing.T) {
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: test.pods}, nil return true, &v1.PodList{Items: test.pods}, nil
}) })
got := evictPod(ctx, fakeClient, test.pod, "v1", false) got := evictPod(ctx, fakeClient, test.pod, "v1")
if got != test.want { if got != test.want {
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got) t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
} }
@@ -83,6 +83,7 @@ func TestIsEvictable(t *testing.T) {
pod *v1.Pod pod *v1.Pod
nodes []*v1.Node nodes []*v1.Node
runBefore func(*v1.Pod, []*v1.Node) runBefore func(*v1.Pod, []*v1.Node)
evictFailedBarePods bool
evictLocalStoragePods bool evictLocalStoragePods bool
evictSystemCriticalPods bool evictSystemCriticalPods bool
priorityThreshold *int32 priorityThreshold *int32
@@ -91,7 +92,27 @@ func TestIsEvictable(t *testing.T) {
} }
testCases := []testCase{ testCases := []testCase{
{ // Normal pod eviction with normal ownerRefs { // Failed pod eviction with no ownerRefs.
pod: test.BuildTestPod("bare_pod_failed", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
pod.Status.Phase = v1.PodFailed
},
evictFailedBarePods: false,
result: false,
}, { // Normal pod eviction with no ownerRefs and evictFailedBarePods enabled
pod: test.BuildTestPod("bare_pod", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
},
evictFailedBarePods: true,
result: false,
}, { // Failed pod eviction with no ownerRefs
pod: test.BuildTestPod("bare_pod_failed_but_can_be_evicted", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
pod.Status.Phase = v1.PodFailed
},
evictFailedBarePods: true,
result: true,
}, { // Normal pod eviction with normal ownerRefs
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil), pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
@@ -417,6 +438,7 @@ func TestIsEvictable(t *testing.T) {
podEvictor := &PodEvictor{ podEvictor := &PodEvictor{
evictLocalStoragePods: test.evictLocalStoragePods, evictLocalStoragePods: test.evictLocalStoragePods,
evictSystemCriticalPods: test.evictSystemCriticalPods, evictSystemCriticalPods: test.evictSystemCriticalPods,
evictFailedBarePods: test.evictFailedBarePods,
nodes: nodes, nodes: nodes,
} }

View File

@@ -17,139 +17,174 @@ limitations under the License.
package pod package pod
import ( import (
"context"
"sort" "sort"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes" "k8s.io/apimachinery/pkg/util/sets"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
) )
const (
nodeNameKeyIndex = "spec.nodeName"
)
// FilterFunc is a filter for a pod.
type FilterFunc func(*v1.Pod) bool
// GetPodsAssignedToNodeFunc is a function which accept a node name and a pod filter function
// as input and returns the pods that assigned to the node.
type GetPodsAssignedToNodeFunc func(string, FilterFunc) ([]*v1.Pod, error)
// WrapFilterFuncs wraps a set of FilterFunc in one.
func WrapFilterFuncs(filters ...FilterFunc) FilterFunc {
return func(pod *v1.Pod) bool {
for _, filter := range filters {
if filter != nil && !filter(pod) {
return false
}
}
return true
}
}
type Options struct { type Options struct {
filter func(pod *v1.Pod) bool filter FilterFunc
includedNamespaces []string includedNamespaces sets.String
excludedNamespaces []string excludedNamespaces sets.String
labelSelector *metav1.LabelSelector labelSelector *metav1.LabelSelector
} }
// NewOptions returns an empty Options.
func NewOptions() *Options {
return &Options{}
}
// WithFilter sets a pod filter. // WithFilter sets a pod filter.
// The filter function should return true if the pod should be returned from ListPodsOnANode // The filter function should return true if the pod should be returned from ListPodsOnANode
func WithFilter(filter func(pod *v1.Pod) bool) func(opts *Options) { func (o *Options) WithFilter(filter FilterFunc) *Options {
return func(opts *Options) { o.filter = filter
opts.filter = filter return o
}
} }
// WithNamespaces sets included namespaces // WithNamespaces sets included namespaces
func WithNamespaces(namespaces []string) func(opts *Options) { func (o *Options) WithNamespaces(namespaces sets.String) *Options {
return func(opts *Options) { o.includedNamespaces = namespaces
opts.includedNamespaces = namespaces return o
}
} }
// WithoutNamespaces sets excluded namespaces // WithoutNamespaces sets excluded namespaces
func WithoutNamespaces(namespaces []string) func(opts *Options) { func (o *Options) WithoutNamespaces(namespaces sets.String) *Options {
return func(opts *Options) { o.excludedNamespaces = namespaces
opts.excludedNamespaces = namespaces return o
}
} }
// WithLabelSelector sets a pod label selector // WithLabelSelector sets a pod label selector
func WithLabelSelector(labelSelector *metav1.LabelSelector) func(opts *Options) { func (o *Options) WithLabelSelector(labelSelector *metav1.LabelSelector) *Options {
return func(opts *Options) { o.labelSelector = labelSelector
opts.labelSelector = labelSelector return o
}
} }
// ListPodsOnANode lists all of the pods on a node // BuildFilterFunc builds a final FilterFunc based on Options.
// It also accepts an optional "filter" function which can be used to further limit the pods that are returned. func (o *Options) BuildFilterFunc() (FilterFunc, error) {
// (Usually this is podEvictor.Evictable().IsEvictable, in order to only list the evictable pods on a node, but can var s labels.Selector
// be used by strategies to extend it if there are further restrictions, such as with NodeAffinity). var err error
func ListPodsOnANode( if o.labelSelector != nil {
ctx context.Context, s, err = metav1.LabelSelectorAsSelector(o.labelSelector)
client clientset.Interface,
node *v1.Node,
opts ...func(opts *Options),
) ([]*v1.Pod, error) {
options := &Options{}
for _, opt := range opts {
opt(options)
}
pods := make([]*v1.Pod, 0)
fieldSelectorString := "spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)
labelSelectorString := ""
if options.labelSelector != nil {
selector, err := metav1.LabelSelectorAsSelector(options.labelSelector)
if err != nil { if err != nil {
return []*v1.Pod{}, err return nil, err
} }
labelSelectorString = selector.String()
} }
return func(pod *v1.Pod) bool {
if len(options.includedNamespaces) > 0 { if o.filter != nil && !o.filter(pod) {
fieldSelector, err := fields.ParseSelector(fieldSelectorString) return false
if err != nil {
return []*v1.Pod{}, err
} }
if len(o.includedNamespaces) > 0 && !o.includedNamespaces.Has(pod.Namespace) {
return false
}
if len(o.excludedNamespaces) > 0 && o.excludedNamespaces.Has(pod.Namespace) {
return false
}
if s != nil && !s.Matches(labels.Set(pod.GetLabels())) {
return false
}
return true
}, nil
}
for _, namespace := range options.includedNamespaces { // BuildGetPodsAssignedToNodeFunc establishes an indexer to map the pods and their assigned nodes.
podList, err := client.CoreV1().Pods(namespace).List(ctx, // It returns a function to help us get all the pods that assigned to a node based on the indexer.
metav1.ListOptions{ func BuildGetPodsAssignedToNodeFunc(podInformer coreinformers.PodInformer) (GetPodsAssignedToNodeFunc, error) {
FieldSelector: fieldSelector.String(), // Establish an indexer to map the pods and their assigned nodes.
LabelSelector: labelSelectorString, err := podInformer.Informer().AddIndexers(cache.Indexers{
}) nodeNameKeyIndex: func(obj interface{}) ([]string, error) {
if err != nil { pod, ok := obj.(*v1.Pod)
return []*v1.Pod{}, err if !ok {
return []string{}, nil
} }
for i := range podList.Items { if len(pod.Spec.NodeName) == 0 {
if options.filter != nil && !options.filter(&podList.Items[i]) { return []string{}, nil
continue }
} return []string{pod.Spec.NodeName}, nil
pods = append(pods, &podList.Items[i]) },
})
if err != nil {
return nil, err
}
// The indexer helps us get all the pods that assigned to a node.
podIndexer := podInformer.Informer().GetIndexer()
getPodsAssignedToNode := func(nodeName string, filter FilterFunc) ([]*v1.Pod, error) {
objs, err := podIndexer.ByIndex(nodeNameKeyIndex, nodeName)
if err != nil {
return nil, err
}
pods := make([]*v1.Pod, 0, len(objs))
for _, obj := range objs {
pod, ok := obj.(*v1.Pod)
if !ok {
continue
}
if filter(pod) {
pods = append(pods, pod)
} }
} }
return pods, nil return pods, nil
} }
return getPodsAssignedToNode, nil
}
if len(options.excludedNamespaces) > 0 { // ListPodsOnANode lists all pods on a node.
for _, namespace := range options.excludedNamespaces { // It also accepts a "filter" function which can be used to further limit the pods that are returned.
fieldSelectorString += ",metadata.namespace!=" + namespace // (Usually this is podEvictor.Evictable().IsEvictable, in order to only list the evictable pods on a node, but can
} // be used by strategies to extend it if there are further restrictions, such as with NodeAffinity).
func ListPodsOnANode(
nodeName string,
getPodsAssignedToNode GetPodsAssignedToNodeFunc,
filter FilterFunc,
) ([]*v1.Pod, error) {
// Succeeded and failed pods are not considered because they don't occupy any resource.
f := func(pod *v1.Pod) bool {
return pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed
} }
return ListAllPodsOnANode(nodeName, getPodsAssignedToNode, WrapFilterFuncs(f, filter))
}
fieldSelector, err := fields.ParseSelector(fieldSelectorString) // ListAllPodsOnANode lists all the pods on a node no matter what the phase of the pod is.
func ListAllPodsOnANode(
nodeName string,
getPodsAssignedToNode GetPodsAssignedToNodeFunc,
filter FilterFunc,
) ([]*v1.Pod, error) {
pods, err := getPodsAssignedToNode(nodeName, filter)
if err != nil { if err != nil {
return []*v1.Pod{}, err return []*v1.Pod{}, err
} }
// INFO(jchaloup): field selectors do not work properly with listers
// Once the descheduler switches to pod listers (through informers),
// We need to flip to client-side filtering.
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(ctx,
metav1.ListOptions{
FieldSelector: fieldSelector.String(),
LabelSelector: labelSelectorString,
})
if err != nil {
return []*v1.Pod{}, err
}
for i := range podList.Items {
// fake client does not support field selectors
// so let's filter based on the node name as well (quite cheap)
if podList.Items[i].Spec.NodeName != node.Name {
continue
}
if options.filter != nil && !options.filter(&podList.Items[i]) {
continue
}
pods = append(pods, &podList.Items[i])
}
return pods, nil return pods, nil
} }

View File

@@ -18,16 +18,15 @@ package pod
import ( import (
"context" "context"
"fmt"
"reflect" "reflect"
"strings"
"testing" "testing"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
@@ -39,19 +38,17 @@ var (
func TestListPodsOnANode(t *testing.T) { func TestListPodsOnANode(t *testing.T) {
testCases := []struct { testCases := []struct {
name string name string
pods map[string][]v1.Pod pods []*v1.Pod
node *v1.Node node *v1.Node
labelSelector *metav1.LabelSelector labelSelector *metav1.LabelSelector
expectedPodCount int expectedPodCount int
}{ }{
{ {
name: "test listing pods on a node", name: "test listing pods on a node",
pods: map[string][]v1.Pod{ pods: []*v1.Pod{
"n1": { test.BuildTestPod("pod1", 100, 0, "n1", nil),
*test.BuildTestPod("pod1", 100, 0, "n1", nil), test.BuildTestPod("pod2", 100, 0, "n1", nil),
*test.BuildTestPod("pod2", 100, 0, "n1", nil), test.BuildTestPod("pod3", 100, 0, "n2", nil),
},
"n2": {*test.BuildTestPod("pod3", 100, 0, "n2", nil)},
}, },
node: test.BuildTestNode("n1", 2000, 3000, 10, nil), node: test.BuildTestNode("n1", 2000, 3000, 10, nil),
labelSelector: nil, labelSelector: nil,
@@ -59,17 +56,15 @@ func TestListPodsOnANode(t *testing.T) {
}, },
{ {
name: "test listing pods with label selector", name: "test listing pods with label selector",
pods: map[string][]v1.Pod{ pods: []*v1.Pod{
"n1": { test.BuildTestPod("pod1", 100, 0, "n1", nil),
*test.BuildTestPod("pod1", 100, 0, "n1", nil), test.BuildTestPod("pod2", 100, 0, "n1", func(pod *v1.Pod) {
*test.BuildTestPod("pod2", 100, 0, "n1", func(pod *v1.Pod) { pod.Labels = map[string]string{"foo": "bar"}
pod.Labels = map[string]string{"foo": "bar"} }),
}), test.BuildTestPod("pod3", 100, 0, "n1", func(pod *v1.Pod) {
*test.BuildTestPod("pod3", 100, 0, "n1", func(pod *v1.Pod) { pod.Labels = map[string]string{"foo": "bar1"}
pod.Labels = map[string]string{"foo": "bar1"} }),
}), test.BuildTestPod("pod4", 100, 0, "n2", nil),
},
"n2": {*test.BuildTestPod("pod4", 100, 0, "n2", nil)},
}, },
node: test.BuildTestNode("n1", 2000, 3000, 10, nil), node: test.BuildTestNode("n1", 2000, 3000, 10, nil),
labelSelector: &metav1.LabelSelector{ labelSelector: &metav1.LabelSelector{
@@ -85,21 +80,38 @@ func TestListPodsOnANode(t *testing.T) {
}, },
} }
for _, testCase := range testCases { for _, testCase := range testCases {
fakeClient := &fake.Clientset{} t.Run(testCase.name, func(t *testing.T) {
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { ctx, cancel := context.WithCancel(context.Background())
list := action.(core.ListAction) defer cancel()
fieldString := list.GetListRestrictions().Fields.String()
if strings.Contains(fieldString, "n1") { var objs []runtime.Object
return true, &v1.PodList{Items: testCase.pods["n1"]}, nil objs = append(objs, testCase.node)
} else if strings.Contains(fieldString, "n2") { for _, pod := range testCase.pods {
return true, &v1.PodList{Items: testCase.pods["n2"]}, nil objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
filter, err := NewOptions().WithLabelSelector(testCase.labelSelector).BuildFilterFunc()
if err != nil {
t.Errorf("Build filter function error: %v", err)
}
pods, _ := ListPodsOnANode(testCase.node.Name, getPodsAssignedToNode, filter)
if len(pods) != testCase.expectedPodCount {
t.Errorf("Expected %v pods on node %v, got %+v", testCase.expectedPodCount, testCase.node.Name, len(pods))
} }
return true, nil, fmt.Errorf("Failed to list: %v", list)
}) })
pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node, WithLabelSelector(testCase.labelSelector))
if len(pods) != testCase.expectedPodCount {
t.Errorf("expected %v pods on node %v, got %+v", testCase.expectedPodCount, testCase.node.Name, len(pods))
}
} }
} }

View File

@@ -66,6 +66,7 @@ func RemoveDuplicatePods(
strategy api.DeschedulerStrategy, strategy api.DeschedulerStrategy,
nodes []*v1.Node, nodes []*v1.Node,
podEvictor *evictions.PodEvictor, podEvictor *evictions.PodEvictor,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) { ) {
if err := validateRemoveDuplicatePodsParams(strategy.Params); err != nil { if err := validateRemoveDuplicatePodsParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemoveDuplicatePods parameters") klog.ErrorS(err, "Invalid RemoveDuplicatePods parameters")
@@ -77,10 +78,10 @@ func RemoveDuplicatePods(
return return
} }
var includedNamespaces, excludedNamespaces []string var includedNamespaces, excludedNamespaces sets.String
if strategy.Params != nil && strategy.Params.Namespaces != nil { if strategy.Params != nil && strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = strategy.Params.Namespaces.Exclude excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
} }
nodeFit := false nodeFit := false
@@ -95,15 +96,19 @@ func RemoveDuplicatePods(
nodeCount := 0 nodeCount := 0
nodeMap := make(map[string]*v1.Node) nodeMap := make(map[string]*v1.Node)
podFilter, err := podutil.NewOptions().
WithFilter(evictable.IsEvictable).
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, node := range nodes { for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(ctx, pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
client,
node,
podutil.WithFilter(evictable.IsEvictable),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
)
if err != nil { if err != nil {
klog.ErrorS(err, "Error listing evictable pods on node", "node", klog.KObj(node)) klog.ErrorS(err, "Error listing evictable pods on node", "node", klog.KObj(node))
continue continue

View File

@@ -25,10 +25,12 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
@@ -43,7 +45,6 @@ func buildTestPodWithImage(podName, node, image string) *v1.Pod {
} }
func TestFindDuplicatePods(t *testing.T) { func TestFindDuplicatePods(t *testing.T) {
ctx := context.Background()
// first setup pods // first setup pods
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil) node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil) node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
@@ -173,104 +174,91 @@ func TestFindDuplicatePods(t *testing.T) {
testCases := []struct { testCases := []struct {
description string description string
maxPodsToEvictPerNode int pods []*v1.Pod
pods []v1.Pod
nodes []*v1.Node nodes []*v1.Node
expectedEvictedPodCount int expectedEvictedPodCount uint
strategy api.DeschedulerStrategy strategy api.DeschedulerStrategy
}{ }{
{ {
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 1 should be evicted.", description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 1 should be evicted.",
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p1, p2, p3},
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1, node2}, nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
strategy: api.DeschedulerStrategy{}, strategy: api.DeschedulerStrategy{},
}, },
{ {
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.", description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p1, p2, p3},
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1, node2}, nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{RemoveDuplicates: &api.RemoveDuplicates{ExcludeOwnerKinds: []string{"ReplicaSet"}}}}, strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{RemoveDuplicates: &api.RemoveDuplicates{ExcludeOwnerKinds: []string{"ReplicaSet"}}}},
}, },
{ {
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 1 should be evicted.", description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 1 should be evicted.",
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p8, p9, p10},
pods: []v1.Pod{*p8, *p9, *p10},
nodes: []*v1.Node{node1, node2}, nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
strategy: api.DeschedulerStrategy{}, strategy: api.DeschedulerStrategy{},
}, },
{ {
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.", description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p1, p2, p3, p8, p9, p10},
pods: []v1.Pod{*p1, *p2, *p3, *p8, *p9, *p10},
nodes: []*v1.Node{node1, node2}, nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 2, expectedEvictedPodCount: 2,
strategy: api.DeschedulerStrategy{}, strategy: api.DeschedulerStrategy{},
}, },
{ {
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.", description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
maxPodsToEvictPerNode: 2, pods: []*v1.Pod{p4, p5, p6, p7},
pods: []v1.Pod{*p4, *p5, *p6, *p7},
nodes: []*v1.Node{node1, node2}, nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{}, strategy: api.DeschedulerStrategy{},
}, },
{ {
description: "Test all Pods: 4 should be evicted.", description: "Test all Pods: 4 should be evicted.",
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p1, p2, p3, p4, p5, p6, p7, p8, p9, p10},
pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10},
nodes: []*v1.Node{node1, node2}, nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 2, expectedEvictedPodCount: 2,
strategy: api.DeschedulerStrategy{}, strategy: api.DeschedulerStrategy{},
}, },
{ {
description: "Pods with the same owner but different images should not be evicted", description: "Pods with the same owner but different images should not be evicted",
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p11, p12},
pods: []v1.Pod{*p11, *p12},
nodes: []*v1.Node{node1, node2}, nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{}, strategy: api.DeschedulerStrategy{},
}, },
{ {
description: "Pods with multiple containers should not match themselves", description: "Pods with multiple containers should not match themselves",
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p13},
pods: []v1.Pod{*p13},
nodes: []*v1.Node{node1, node2}, nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{}, strategy: api.DeschedulerStrategy{},
}, },
{ {
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction", description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p11, p13},
pods: []v1.Pod{*p11, *p13},
nodes: []*v1.Node{node1, node2}, nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{}, strategy: api.DeschedulerStrategy{},
}, },
{ {
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. Only node available has a taint, and nodeFit set to true. 0 should be evicted.", description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. Only node available has a taint, and nodeFit set to true. 0 should be evicted.",
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p1, p2, p3},
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1, node3}, nodes: []*v1.Node{node1, node3},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}}, strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
}, },
{ {
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet, all with a nodeSelector. Only node available has an incorrect node label, and nodeFit set to true. 0 should be evicted.", description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet, all with a nodeSelector. Only node available has an incorrect node label, and nodeFit set to true. 0 should be evicted.",
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p15, p16, p17},
pods: []v1.Pod{*p15, *p16, *p17},
nodes: []*v1.Node{node1, node4}, nodes: []*v1.Node{node1, node4},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}}, strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
}, },
{ {
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available is not schedulable, and nodeFit set to true. 0 should be evicted.", description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available is not schedulable, and nodeFit set to true. 0 should be evicted.",
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p1, p2, p3},
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1, node5}, nodes: []*v1.Node{node1, node5},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}}, strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
@@ -279,22 +267,44 @@ func TestFindDuplicatePods(t *testing.T) {
for _, testCase := range testCases { for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) { t.Run(testCase.description, func(t *testing.T) {
fakeClient := &fake.Clientset{} ctx, cancel := context.WithCancel(context.Background())
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { defer cancel()
return true, &v1.PodList{Items: testCase.pods}, nil
}) var objs []runtime.Object
for _, node := range testCase.nodes {
objs = append(objs, node)
}
for _, pod := range testCase.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
"v1", "v1",
false, false,
testCase.maxPodsToEvictPerNode, nil,
nil,
testCase.nodes, testCase.nodes,
false, false,
false, false,
false, false,
false,
false,
) )
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor) RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
podsEvicted := podEvictor.TotalEvicted() podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != testCase.expectedEvictedPodCount { if podsEvicted != testCase.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted) t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
@@ -305,8 +315,6 @@ func TestFindDuplicatePods(t *testing.T) {
} }
func TestRemoveDuplicatesUniformly(t *testing.T) { func TestRemoveDuplicatesUniformly(t *testing.T) {
ctx := context.Background()
setRSOwnerRef2 := func(pod *v1.Pod) { setRSOwnerRef2 := func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
{Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-2"}, {Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-2"},
@@ -432,25 +440,24 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
testCases := []struct { testCases := []struct {
description string description string
maxPodsToEvictPerNode int pods []*v1.Pod
pods []v1.Pod
nodes []*v1.Node nodes []*v1.Node
expectedEvictedPodCount int expectedEvictedPodCount uint
strategy api.DeschedulerStrategy strategy api.DeschedulerStrategy
}{ }{
{ {
description: "Evict pods uniformly", description: "Evict pods uniformly",
pods: []v1.Pod{ pods: []*v1.Pod{
// (5,3,1) -> (3,3,3) -> 2 evictions // (5,3,1) -> (3,3,3) -> 2 evictions
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef), test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef), test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef), test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef), test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
}, },
expectedEvictedPodCount: 2, expectedEvictedPodCount: 2,
nodes: []*v1.Node{ nodes: []*v1.Node{
@@ -462,17 +469,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
}, },
{ {
description: "Evict pods uniformly with one node left out", description: "Evict pods uniformly with one node left out",
pods: []v1.Pod{ pods: []*v1.Pod{
// (5,3,1) -> (4,4,1) -> 1 eviction // (5,3,1) -> (4,4,1) -> 1 eviction
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef), test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef), test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef), test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef), test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
}, },
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
nodes: []*v1.Node{ nodes: []*v1.Node{
@@ -483,17 +490,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
}, },
{ {
description: "Evict pods uniformly with two replica sets", description: "Evict pods uniformly with two replica sets",
pods: []v1.Pod{ pods: []*v1.Pod{
// (5,3,1) -> (3,3,3) -> 2 evictions // (5,3,1) -> (3,3,3) -> 2 evictions
*test.BuildTestPod("p11", 100, 0, "n1", setTwoRSOwnerRef), test.BuildTestPod("p11", 100, 0, "n1", setTwoRSOwnerRef),
*test.BuildTestPod("p12", 100, 0, "n1", setTwoRSOwnerRef), test.BuildTestPod("p12", 100, 0, "n1", setTwoRSOwnerRef),
*test.BuildTestPod("p13", 100, 0, "n1", setTwoRSOwnerRef), test.BuildTestPod("p13", 100, 0, "n1", setTwoRSOwnerRef),
*test.BuildTestPod("p14", 100, 0, "n1", setTwoRSOwnerRef), test.BuildTestPod("p14", 100, 0, "n1", setTwoRSOwnerRef),
*test.BuildTestPod("p15", 100, 0, "n1", setTwoRSOwnerRef), test.BuildTestPod("p15", 100, 0, "n1", setTwoRSOwnerRef),
*test.BuildTestPod("p16", 100, 0, "n2", setTwoRSOwnerRef), test.BuildTestPod("p16", 100, 0, "n2", setTwoRSOwnerRef),
*test.BuildTestPod("p17", 100, 0, "n2", setTwoRSOwnerRef), test.BuildTestPod("p17", 100, 0, "n2", setTwoRSOwnerRef),
*test.BuildTestPod("p18", 100, 0, "n2", setTwoRSOwnerRef), test.BuildTestPod("p18", 100, 0, "n2", setTwoRSOwnerRef),
*test.BuildTestPod("p19", 100, 0, "n3", setTwoRSOwnerRef), test.BuildTestPod("p19", 100, 0, "n3", setTwoRSOwnerRef),
}, },
expectedEvictedPodCount: 4, expectedEvictedPodCount: 4,
nodes: []*v1.Node{ nodes: []*v1.Node{
@@ -505,27 +512,27 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
}, },
{ {
description: "Evict pods uniformly with two owner references", description: "Evict pods uniformly with two owner references",
pods: []v1.Pod{ pods: []*v1.Pod{
// (5,3,1) -> (3,3,3) -> 2 evictions // (5,3,1) -> (3,3,3) -> 2 evictions
*test.BuildTestPod("p11", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p11", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p12", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p12", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p13", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p13", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p14", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p14", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p15", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p15", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p16", 100, 0, "n2", test.SetRSOwnerRef), test.BuildTestPod("p16", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p17", 100, 0, "n2", test.SetRSOwnerRef), test.BuildTestPod("p17", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p18", 100, 0, "n2", test.SetRSOwnerRef), test.BuildTestPod("p18", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p19", 100, 0, "n3", test.SetRSOwnerRef), test.BuildTestPod("p19", 100, 0, "n3", test.SetRSOwnerRef),
// (1,3,5) -> (3,3,3) -> 2 evictions // (1,3,5) -> (3,3,3) -> 2 evictions
*test.BuildTestPod("p21", 100, 0, "n1", setRSOwnerRef2), test.BuildTestPod("p21", 100, 0, "n1", setRSOwnerRef2),
*test.BuildTestPod("p22", 100, 0, "n2", setRSOwnerRef2), test.BuildTestPod("p22", 100, 0, "n2", setRSOwnerRef2),
*test.BuildTestPod("p23", 100, 0, "n2", setRSOwnerRef2), test.BuildTestPod("p23", 100, 0, "n2", setRSOwnerRef2),
*test.BuildTestPod("p24", 100, 0, "n2", setRSOwnerRef2), test.BuildTestPod("p24", 100, 0, "n2", setRSOwnerRef2),
*test.BuildTestPod("p25", 100, 0, "n3", setRSOwnerRef2), test.BuildTestPod("p25", 100, 0, "n3", setRSOwnerRef2),
*test.BuildTestPod("p26", 100, 0, "n3", setRSOwnerRef2), test.BuildTestPod("p26", 100, 0, "n3", setRSOwnerRef2),
*test.BuildTestPod("p27", 100, 0, "n3", setRSOwnerRef2), test.BuildTestPod("p27", 100, 0, "n3", setRSOwnerRef2),
*test.BuildTestPod("p28", 100, 0, "n3", setRSOwnerRef2), test.BuildTestPod("p28", 100, 0, "n3", setRSOwnerRef2),
*test.BuildTestPod("p29", 100, 0, "n3", setRSOwnerRef2), test.BuildTestPod("p29", 100, 0, "n3", setRSOwnerRef2),
}, },
expectedEvictedPodCount: 4, expectedEvictedPodCount: 4,
nodes: []*v1.Node{ nodes: []*v1.Node{
@@ -537,10 +544,10 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
}, },
{ {
description: "Evict pods with number of pods less than nodes", description: "Evict pods with number of pods less than nodes",
pods: []v1.Pod{ pods: []*v1.Pod{
// (2,0,0) -> (1,1,0) -> 1 eviction // (2,0,0) -> (1,1,0) -> 1 eviction
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
}, },
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
nodes: []*v1.Node{ nodes: []*v1.Node{
@@ -552,14 +559,14 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
}, },
{ {
description: "Evict pods with number of pods less than nodes, but ignore different pods with the same ownerref", description: "Evict pods with number of pods less than nodes, but ignore different pods with the same ownerref",
pods: []v1.Pod{ pods: []*v1.Pod{
// (1, 0, 0) for "bar","baz" images -> no eviction, even with a matching ownerKey // (1, 0, 0) for "bar","baz" images -> no eviction, even with a matching ownerKey
// (2, 0, 0) for "foo" image -> (1,1,0) - 1 eviction // (2, 0, 0) for "foo" image -> (1,1,0) - 1 eviction
// In this case the only "real" duplicates are p1 and p4, so one of those should be evicted // In this case the only "real" duplicates are p1 and p4, so one of those should be evicted
*buildTestPodWithImage("p1", "n1", "foo"), buildTestPodWithImage("p1", "n1", "foo"),
*buildTestPodWithImage("p2", "n1", "bar"), buildTestPodWithImage("p2", "n1", "bar"),
*buildTestPodWithImage("p3", "n1", "baz"), buildTestPodWithImage("p3", "n1", "baz"),
*buildTestPodWithImage("p4", "n1", "foo"), buildTestPodWithImage("p4", "n1", "foo"),
}, },
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
nodes: []*v1.Node{ nodes: []*v1.Node{
@@ -571,9 +578,9 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
}, },
{ {
description: "Evict pods with a single pod with three nodes", description: "Evict pods with a single pod with three nodes",
pods: []v1.Pod{ pods: []*v1.Pod{
// (2,0,0) -> (1,1,0) -> 1 eviction // (2,0,0) -> (1,1,0) -> 1 eviction
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef), test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
}, },
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
nodes: []*v1.Node{ nodes: []*v1.Node{
@@ -585,17 +592,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
}, },
{ {
description: "Evict pods uniformly respecting taints", description: "Evict pods uniformly respecting taints",
pods: []v1.Pod{ pods: []*v1.Pod{
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions // (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
*test.BuildTestPod("p1", 100, 0, "worker1", setTolerationsK1), test.BuildTestPod("p1", 100, 0, "worker1", setTolerationsK1),
*test.BuildTestPod("p2", 100, 0, "worker1", setTolerationsK2), test.BuildTestPod("p2", 100, 0, "worker1", setTolerationsK2),
*test.BuildTestPod("p3", 100, 0, "worker1", setTolerationsK1), test.BuildTestPod("p3", 100, 0, "worker1", setTolerationsK1),
*test.BuildTestPod("p4", 100, 0, "worker1", setTolerationsK2), test.BuildTestPod("p4", 100, 0, "worker1", setTolerationsK2),
*test.BuildTestPod("p5", 100, 0, "worker1", setTolerationsK1), test.BuildTestPod("p5", 100, 0, "worker1", setTolerationsK1),
*test.BuildTestPod("p6", 100, 0, "worker2", setTolerationsK2), test.BuildTestPod("p6", 100, 0, "worker2", setTolerationsK2),
*test.BuildTestPod("p7", 100, 0, "worker2", setTolerationsK1), test.BuildTestPod("p7", 100, 0, "worker2", setTolerationsK1),
*test.BuildTestPod("p8", 100, 0, "worker2", setTolerationsK2), test.BuildTestPod("p8", 100, 0, "worker2", setTolerationsK2),
*test.BuildTestPod("p9", 100, 0, "worker3", setTolerationsK1), test.BuildTestPod("p9", 100, 0, "worker3", setTolerationsK1),
}, },
expectedEvictedPodCount: 2, expectedEvictedPodCount: 2,
nodes: []*v1.Node{ nodes: []*v1.Node{
@@ -610,17 +617,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
}, },
{ {
description: "Evict pods uniformly respecting RequiredDuringSchedulingIgnoredDuringExecution node affinity", description: "Evict pods uniformly respecting RequiredDuringSchedulingIgnoredDuringExecution node affinity",
pods: []v1.Pod{ pods: []*v1.Pod{
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions // (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
*test.BuildTestPod("p1", 100, 0, "worker1", setNotMasterNodeSelectorK1), test.BuildTestPod("p1", 100, 0, "worker1", setNotMasterNodeSelectorK1),
*test.BuildTestPod("p2", 100, 0, "worker1", setNotMasterNodeSelectorK2), test.BuildTestPod("p2", 100, 0, "worker1", setNotMasterNodeSelectorK2),
*test.BuildTestPod("p3", 100, 0, "worker1", setNotMasterNodeSelectorK1), test.BuildTestPod("p3", 100, 0, "worker1", setNotMasterNodeSelectorK1),
*test.BuildTestPod("p4", 100, 0, "worker1", setNotMasterNodeSelectorK2), test.BuildTestPod("p4", 100, 0, "worker1", setNotMasterNodeSelectorK2),
*test.BuildTestPod("p5", 100, 0, "worker1", setNotMasterNodeSelectorK1), test.BuildTestPod("p5", 100, 0, "worker1", setNotMasterNodeSelectorK1),
*test.BuildTestPod("p6", 100, 0, "worker2", setNotMasterNodeSelectorK2), test.BuildTestPod("p6", 100, 0, "worker2", setNotMasterNodeSelectorK2),
*test.BuildTestPod("p7", 100, 0, "worker2", setNotMasterNodeSelectorK1), test.BuildTestPod("p7", 100, 0, "worker2", setNotMasterNodeSelectorK1),
*test.BuildTestPod("p8", 100, 0, "worker2", setNotMasterNodeSelectorK2), test.BuildTestPod("p8", 100, 0, "worker2", setNotMasterNodeSelectorK2),
*test.BuildTestPod("p9", 100, 0, "worker3", setNotMasterNodeSelectorK1), test.BuildTestPod("p9", 100, 0, "worker3", setNotMasterNodeSelectorK1),
}, },
expectedEvictedPodCount: 2, expectedEvictedPodCount: 2,
nodes: []*v1.Node{ nodes: []*v1.Node{
@@ -635,17 +642,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
}, },
{ {
description: "Evict pods uniformly respecting node selector", description: "Evict pods uniformly respecting node selector",
pods: []v1.Pod{ pods: []*v1.Pod{
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions // (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
*test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1), test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
*test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2), test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
*test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1), test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
*test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2), test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
*test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1), test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
*test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2), test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
*test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1), test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
*test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2), test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
*test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1), test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
}, },
expectedEvictedPodCount: 2, expectedEvictedPodCount: 2,
nodes: []*v1.Node{ nodes: []*v1.Node{
@@ -660,17 +667,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
}, },
{ {
description: "Evict pods uniformly respecting node selector with zero target nodes", description: "Evict pods uniformly respecting node selector with zero target nodes",
pods: []v1.Pod{ pods: []*v1.Pod{
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions // (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
*test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1), test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
*test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2), test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
*test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1), test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
*test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2), test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
*test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1), test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
*test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2), test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
*test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1), test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
*test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2), test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
*test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1), test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
}, },
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
nodes: []*v1.Node{ nodes: []*v1.Node{
@@ -687,22 +694,44 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
for _, testCase := range testCases { for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) { t.Run(testCase.description, func(t *testing.T) {
fakeClient := &fake.Clientset{} ctx, cancel := context.WithCancel(context.Background())
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { defer cancel()
return true, &v1.PodList{Items: testCase.pods}, nil
}) var objs []runtime.Object
for _, node := range testCase.nodes {
objs = append(objs, node)
}
for _, pod := range testCase.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
policyv1.SchemeGroupVersion.String(), policyv1.SchemeGroupVersion.String(),
false, false,
testCase.maxPodsToEvictPerNode, nil,
nil,
testCase.nodes, testCase.nodes,
false, false,
false, false,
false, false,
false,
false,
) )
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor) RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
podsEvicted := podEvictor.TotalEvicted() podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != testCase.expectedEvictedPodCount { if podsEvicted != testCase.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted) t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)

View File

@@ -0,0 +1,179 @@
package strategies
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/validation"
)
// validatedFailedPodsStrategyParams contains validated strategy parameters
type validatedFailedPodsStrategyParams struct {
validation.ValidatedStrategyParams
includingInitContainers bool
reasons sets.String
excludeOwnerKinds sets.String
minPodLifetimeSeconds *uint
}
// RemoveFailedPods removes Pods that are in failed status phase.
func RemoveFailedPods(
ctx context.Context,
client clientset.Interface,
strategy api.DeschedulerStrategy,
nodes []*v1.Node,
podEvictor *evictions.PodEvictor,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) {
strategyParams, err := validateAndParseRemoveFailedPodsParams(ctx, client, strategy.Params)
if err != nil {
klog.ErrorS(err, "Invalid RemoveFailedPods parameters")
return
}
evictable := podEvictor.Evictable(
evictions.WithPriorityThreshold(strategyParams.ThresholdPriority),
evictions.WithNodeFit(strategyParams.NodeFit),
evictions.WithLabelSelector(strategyParams.LabelSelector),
)
var labelSelector *metav1.LabelSelector
if strategy.Params != nil {
labelSelector = strategy.Params.LabelSelector
}
podFilter, err := podutil.NewOptions().
WithFilter(evictable.IsEvictable).
WithNamespaces(strategyParams.IncludedNamespaces).
WithoutNamespaces(strategyParams.ExcludedNamespaces).
WithLabelSelector(labelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
// Only list failed pods
phaseFilter := func(pod *v1.Pod) bool { return pod.Status.Phase == v1.PodFailed }
podFilter = podutil.WrapFilterFuncs(phaseFilter, podFilter)
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListAllPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
if err != nil {
klog.ErrorS(err, "Error listing a nodes failed pods", "node", klog.KObj(node))
continue
}
for i, pod := range pods {
if err = validateFailedPodShouldEvict(pod, *strategyParams); err != nil {
klog.V(4).InfoS(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
continue
}
if _, err = podEvictor.EvictPod(ctx, pods[i], node, "FailedPod"); err != nil {
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
break
}
}
}
}
func validateAndParseRemoveFailedPodsParams(
ctx context.Context,
client clientset.Interface,
params *api.StrategyParameters,
) (*validatedFailedPodsStrategyParams, error) {
if params == nil {
return &validatedFailedPodsStrategyParams{
ValidatedStrategyParams: validation.DefaultValidatedStrategyParams(),
}, nil
}
strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, params)
if err != nil {
return nil, err
}
var reasons, excludeOwnerKinds sets.String
var includingInitContainers bool
var minPodLifetimeSeconds *uint
if params.FailedPods != nil {
reasons = sets.NewString(params.FailedPods.Reasons...)
includingInitContainers = params.FailedPods.IncludingInitContainers
excludeOwnerKinds = sets.NewString(params.FailedPods.ExcludeOwnerKinds...)
minPodLifetimeSeconds = params.FailedPods.MinPodLifetimeSeconds
}
return &validatedFailedPodsStrategyParams{
ValidatedStrategyParams: *strategyParams,
includingInitContainers: includingInitContainers,
reasons: reasons,
excludeOwnerKinds: excludeOwnerKinds,
minPodLifetimeSeconds: minPodLifetimeSeconds,
}, nil
}
// validateFailedPodShouldEvict looks at strategy params settings to see if the Pod
// should be evicted given the params in the PodFailed policy.
func validateFailedPodShouldEvict(pod *v1.Pod, strategyParams validatedFailedPodsStrategyParams) error {
var errs []error
if strategyParams.minPodLifetimeSeconds != nil {
podAgeSeconds := uint(metav1.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
if podAgeSeconds < *strategyParams.minPodLifetimeSeconds {
errs = append(errs, fmt.Errorf("pod does not exceed the min age seconds of %d", *strategyParams.minPodLifetimeSeconds))
}
}
if len(strategyParams.excludeOwnerKinds) > 0 {
ownerRefList := podutil.OwnerRef(pod)
for _, owner := range ownerRefList {
if strategyParams.excludeOwnerKinds.Has(owner.Kind) {
errs = append(errs, fmt.Errorf("pod's owner kind of %s is excluded", owner.Kind))
}
}
}
if len(strategyParams.reasons) > 0 {
reasons := getFailedContainerStatusReasons(pod.Status.ContainerStatuses)
if pod.Status.Phase == v1.PodFailed && pod.Status.Reason != "" {
reasons = append(reasons, pod.Status.Reason)
}
if strategyParams.includingInitContainers {
reasons = append(reasons, getFailedContainerStatusReasons(pod.Status.InitContainerStatuses)...)
}
if !strategyParams.reasons.HasAny(reasons...) {
errs = append(errs, fmt.Errorf("pod does not match any of the reasons"))
}
}
return utilerrors.NewAggregate(errs)
}
func getFailedContainerStatusReasons(containerStatuses []v1.ContainerStatus) []string {
reasons := make([]string, 0)
for _, containerStatus := range containerStatuses {
if containerStatus.State.Waiting != nil && containerStatus.State.Waiting.Reason != "" {
reasons = append(reasons, containerStatus.State.Waiting.Reason)
}
if containerStatus.State.Terminated != nil && containerStatus.State.Terminated.Reason != "" {
reasons = append(reasons, containerStatus.State.Terminated.Reason)
}
}
return reasons
}

View File

@@ -0,0 +1,343 @@
package strategies
import (
"context"
"testing"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test"
)
var (
OneHourInSeconds uint = 3600
)
func TestRemoveFailedPods(t *testing.T) {
createStrategy := func(enabled, includingInitContainers bool, reasons, excludeKinds []string, minAgeSeconds *uint, nodeFit bool) api.DeschedulerStrategy {
return api.DeschedulerStrategy{
Enabled: enabled,
Params: &api.StrategyParameters{
FailedPods: &api.FailedPods{
Reasons: reasons,
IncludingInitContainers: includingInitContainers,
ExcludeOwnerKinds: excludeKinds,
MinPodLifetimeSeconds: minAgeSeconds,
},
NodeFit: nodeFit,
},
}
}
tests := []struct {
description string
nodes []*v1.Node
strategy api.DeschedulerStrategy
expectedEvictedPodCount uint
pods []*v1.Pod
}{
{
description: "default empty strategy, 0 failures, 0 evictions",
strategy: api.DeschedulerStrategy{},
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{}, // no pods come back with field selector phase=Failed
},
{
description: "0 failures, 0 evictions",
strategy: createStrategy(true, false, nil, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{}, // no pods come back with field selector phase=Failed
},
{
description: "1 container terminated with reason NodeAffinity, 1 eviction",
strategy: createStrategy(true, false, nil, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 1,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}), nil),
},
},
{
description: "1 init container terminated with reason NodeAffinity, 1 eviction",
strategy: createStrategy(true, true, nil, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 1,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}, nil), nil),
},
},
{
description: "1 init container waiting with reason CreateContainerConfigError, 1 eviction",
strategy: createStrategy(true, true, nil, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 1,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", &v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerConfigError"},
}, nil), nil),
},
},
{
description: "2 init container waiting with reason CreateContainerConfigError, 2 nodes, 2 evictions",
strategy: createStrategy(true, true, nil, nil, nil, false),
nodes: []*v1.Node{
test.BuildTestNode("node1", 2000, 3000, 10, nil),
test.BuildTestNode("node2", 2000, 3000, 10, nil),
},
expectedEvictedPodCount: 2,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "CreateContainerConfigError"},
}, nil), nil),
buildTestPod("p2", "node2", newPodStatus("", "", &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "CreateContainerConfigError"},
}, nil), nil),
},
},
{
description: "include reason=CreateContainerConfigError, 1 container terminated with reason CreateContainerConfigError, 1 eviction",
strategy: createStrategy(true, false, []string{"CreateContainerConfigError"}, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 1,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "CreateContainerConfigError"},
}), nil),
},
},
{
description: "include reason=CreateContainerConfigError+NodeAffinity, 1 container terminated with reason CreateContainerConfigError, 1 eviction",
strategy: createStrategy(true, false, []string{"CreateContainerConfigError", "NodeAffinity"}, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 1,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "CreateContainerConfigError"},
}), nil),
},
},
{
description: "include reason=CreateContainerConfigError, 1 container terminated with reason NodeAffinity, 0 eviction",
strategy: createStrategy(true, false, []string{"CreateContainerConfigError"}, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}), nil),
},
},
{
description: "include init container=false, 1 init container waiting with reason CreateContainerConfigError, 0 eviction",
strategy: createStrategy(true, false, []string{"CreateContainerConfigError"}, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", &v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerConfigError"},
}, nil), nil),
},
},
{
description: "lifetime 1 hour, 1 container terminated with reason NodeAffinity, 0 eviction",
strategy: createStrategy(true, false, nil, nil, &OneHourInSeconds, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}), nil),
},
},
{
description: "nodeFit=true, 1 unschedulable node, 1 container terminated with reason NodeAffinity, 0 eviction",
strategy: createStrategy(true, false, nil, nil, nil, true),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, func(node *v1.Node) {
node.Spec.Unschedulable = true
})},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}), nil),
},
},
{
description: "excluded owner kind=ReplicaSet, 1 init container terminated with owner kind=ReplicaSet, 0 eviction",
strategy: createStrategy(true, true, nil, []string{"ReplicaSet"}, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}, nil), nil),
},
},
{
description: "excluded owner kind=DaemonSet, 1 init container terminated with owner kind=ReplicaSet, 1 eviction",
strategy: createStrategy(true, true, nil, []string{"DaemonSet"}, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 1,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}, nil), nil),
},
},
{
description: "excluded owner kind=DaemonSet, 1 init container terminated with owner kind=ReplicaSet, 1 pod in termination; nothing should be moved",
strategy: createStrategy(true, true, nil, []string{"DaemonSet"}, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}, nil), &metav1.Time{}),
},
},
{
description: "1 container terminated with reason ShutDown, 0 evictions",
strategy: createStrategy(true, false, nil, nil, nil, true),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("Shutdown", v1.PodFailed, nil, nil), nil),
},
},
{
description: "include reason=Shutdown, 2 containers terminated with reason ShutDown, 2 evictions",
strategy: createStrategy(true, false, []string{"Shutdown"}, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 2,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("Shutdown", v1.PodFailed, nil, nil), nil),
buildTestPod("p2", "node1", newPodStatus("Shutdown", v1.PodFailed, nil, nil), nil),
},
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range tc.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
nil,
nil,
tc.nodes,
false,
false,
false,
false,
false,
)
RemoveFailedPods(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
}
})
}
}
func TestValidRemoveFailedPodsParams(t *testing.T) {
ctx := context.Background()
fakeClient := &fake.Clientset{}
testCases := []struct {
name string
params *api.StrategyParameters
}{
{name: "validate nil params", params: nil},
{name: "validate empty params", params: &api.StrategyParameters{}},
{name: "validate reasons params", params: &api.StrategyParameters{FailedPods: &api.FailedPods{
Reasons: []string{"CreateContainerConfigError"},
}}},
{name: "validate includingInitContainers params", params: &api.StrategyParameters{FailedPods: &api.FailedPods{
IncludingInitContainers: true,
}}},
{name: "validate excludeOwnerKinds params", params: &api.StrategyParameters{FailedPods: &api.FailedPods{
ExcludeOwnerKinds: []string{"Job"},
}}},
{name: "validate excludeOwnerKinds params", params: &api.StrategyParameters{FailedPods: &api.FailedPods{
MinPodLifetimeSeconds: &OneHourInSeconds,
}}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
params, err := validateAndParseRemoveFailedPodsParams(ctx, fakeClient, tc.params)
if err != nil {
t.Errorf("strategy params should be valid but got err: %v", err.Error())
}
if params == nil {
t.Errorf("strategy params should return a ValidatedFailedPodsStrategyParams but got nil")
}
})
}
}
func newPodStatus(reason string, phase v1.PodPhase, initContainerState, containerState *v1.ContainerState) v1.PodStatus {
ps := v1.PodStatus{
Reason: reason,
Phase: phase,
}
if initContainerState != nil {
ps.InitContainerStatuses = []v1.ContainerStatus{{State: *initContainerState}}
ps.Phase = v1.PodFailed
}
if containerState != nil {
ps.ContainerStatuses = []v1.ContainerStatus{{State: *containerState}}
ps.Phase = v1.PodFailed
}
return ps
}
func buildTestPod(podName, nodeName string, podStatus v1.PodStatus, deletionTimestamp *metav1.Time) *v1.Pod {
pod := test.BuildTestPod(podName, 1, 1, nodeName, func(p *v1.Pod) {
p.Status = podStatus
})
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
pod.ObjectMeta.SetCreationTimestamp(metav1.Now())
pod.DeletionTimestamp = deletionTimestamp
return pod
}

View File

@@ -21,6 +21,7 @@ import (
"fmt" "fmt"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2" "k8s.io/klog/v2"
@@ -47,7 +48,7 @@ func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) err
} }
// RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity // RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil { if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeAffinity parameters") klog.ErrorS(err, "Invalid RemovePodsViolatingNodeAffinity parameters")
return return
@@ -58,10 +59,10 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
return return
} }
var includedNamespaces, excludedNamespaces []string var includedNamespaces, excludedNamespaces sets.String
if strategy.Params.Namespaces != nil { if strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = strategy.Params.Namespaces.Exclude excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
} }
nodeFit := false nodeFit := false
@@ -71,6 +72,16 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
podFilter, err := podutil.NewOptions().
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(strategy.Params.LabelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, nodeAffinity := range strategy.Params.NodeAffinityType { for _, nodeAffinity := range strategy.Params.NodeAffinityType {
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity) klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
@@ -80,17 +91,13 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode( pods, err := podutil.ListPodsOnANode(
ctx, node.Name,
client, getPodsAssignedToNode,
node, podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
podutil.WithFilter(func(pod *v1.Pod) bool {
return evictable.IsEvictable(pod) && return evictable.IsEvictable(pod) &&
!nodeutil.PodFitsCurrentNode(pod, node) && !nodeutil.PodFitsCurrentNode(pod, node) &&
nodeutil.PodFitsAnyNode(pod, nodes) nodeutil.PodFitsAnyNode(pod, nodes)
}), }),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
podutil.WithLabelSelector(strategy.Params.LabelSelector),
) )
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to get pods", "node", klog.KObj(node)) klog.ErrorS(err, "Failed to get pods", "node", klog.KObj(node))

View File

@@ -22,16 +22,18 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1" policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
func TestRemovePodsViolatingNodeAffinity(t *testing.T) { func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
ctx := context.Background()
requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{ requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{
Enabled: true, Enabled: true,
Params: &api.StrategyParameters{ Params: &api.StrategyParameters{
@@ -59,10 +61,10 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10, nil) nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10, nil)
unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10, nil) unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10, nil)
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue unschedulableNodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
unschedulableNodeWithLabels.Spec.Unschedulable = true unschedulableNodeWithLabels.Spec.Unschedulable = true
addPodsToNode := func(node *v1.Node) []v1.Pod { addPodsToNode := func(node *v1.Node, deletionTimestamp *metav1.Time) []*v1.Pod {
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name, nil) podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name, nil)
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{ podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{ NodeAffinity: &v1.NodeAffinity{
@@ -91,20 +93,25 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() pod2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
return []v1.Pod{ pod1.DeletionTimestamp = deletionTimestamp
*podWithNodeAffinity, pod2.DeletionTimestamp = deletionTimestamp
*pod1,
*pod2, return []*v1.Pod{
podWithNodeAffinity,
pod1,
pod2,
} }
} }
var uint1 uint = 1
tests := []struct { tests := []struct {
description string description string
nodes []*v1.Node nodes []*v1.Node
pods []v1.Pod pods []*v1.Pod
strategy api.DeschedulerStrategy strategy api.DeschedulerStrategy
expectedEvictedPodCount int expectedEvictedPodCount uint
maxPodsToEvictPerNode int maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
}{ }{
{ {
description: "Invalid strategy type, should not evict any pods", description: "Invalid strategy type, should not evict any pods",
@@ -117,74 +124,116 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
}, },
}, },
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels), pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels}, nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: 0,
}, },
{ {
description: "Pod is correctly scheduled on node, no eviction expected", description: "Pod is correctly scheduled on node, no eviction expected",
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy, strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithLabels), pods: addPodsToNode(nodeWithLabels, nil),
nodes: []*v1.Node{nodeWithLabels}, nodes: []*v1.Node{nodeWithLabels},
maxPodsToEvictPerNode: 0,
}, },
{ {
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted", description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy, strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels), pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels}, nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: 0,
}, },
{ {
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should not be evicted", description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should not be evicted",
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy, strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels), pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels}, nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: 1, maxPodsToEvictPerNode: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, no pod evicted since pod terminting",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, should not be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, no pod evicted since pod terminting",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
}, },
{ {
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict", description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
strategy: requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy, strategy: requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy,
pods: addPodsToNode(nodeWithoutLabels), pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels}, nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
maxPodsToEvictPerNode: 0,
}, },
{ {
description: "Pod is scheduled on node without matching labels, and node where pod fits is available, should evict", description: "Pod is scheduled on node without matching labels, and node where pod fits is available, should evict",
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
strategy: requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy, strategy: requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy,
pods: addPodsToNode(nodeWithoutLabels), pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels}, nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
maxPodsToEvictPerNode: 1, maxPodsToEvictPerNode: &uint1,
}, },
} }
for _, tc := range tests { for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
fakeClient := &fake.Clientset{} var objs []runtime.Object
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { for _, node := range tc.nodes {
return true, &v1.PodList{Items: tc.pods}, nil objs = append(objs, node)
}
for _, pod := range tc.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes,
false,
false,
false,
false,
false,
)
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
}
}) })
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.nodes,
false,
false,
false,
)
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
}
} }
} }

View File

@@ -20,6 +20,7 @@ import (
"context" "context"
"fmt" "fmt"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
@@ -48,18 +49,18 @@ func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters)
} }
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes // RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil { if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeTaints parameters") klog.ErrorS(err, "Invalid RemovePodsViolatingNodeTaints parameters")
return return
} }
var includedNamespaces, excludedNamespaces []string var includedNamespaces, excludedNamespaces sets.String
var labelSelector *metav1.LabelSelector var labelSelector *metav1.LabelSelector
if strategy.Params != nil { if strategy.Params != nil {
if strategy.Params.Namespaces != nil { if strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = strategy.Params.Namespaces.Exclude excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
} }
labelSelector = strategy.Params.LabelSelector labelSelector = strategy.Params.LabelSelector
} }
@@ -77,17 +78,20 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
podFilter, err := podutil.NewOptions().
WithFilter(evictable.IsEvictable).
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(labelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, node := range nodes { for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode( pods, err := podutil.ListAllPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
ctx,
client,
node,
podutil.WithFilter(evictable.IsEvictable),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
podutil.WithLabelSelector(labelSelector),
)
if err != nil { if err != nil {
//no pods evicted as error encountered retrieving evictable Pods //no pods evicted as error encountered retrieving evictable Pods
return return

View File

@@ -9,10 +9,12 @@ import (
policyv1 "k8s.io/api/policy/v1" policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
@@ -45,11 +47,10 @@ func addTolerationToPod(pod *v1.Pod, key, value string, index int) *v1.Pod {
} }
func TestDeletePodsViolatingNodeTaints(t *testing.T) { func TestDeletePodsViolatingNodeTaints(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil) node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
node1 = addTaintsToNode(node1, "testTaint", "test", []int{1}) node1 = addTaintsToNode(node1, "testTaint", "test", []int{1})
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil) node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
node1 = addTaintsToNode(node2, "testingTaint", "testing", []int{1}) node2 = addTaintsToNode(node2, "testingTaint", "testing", []int{1})
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) { node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{ node.ObjectMeta.Labels = map[string]string{
@@ -116,144 +117,168 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
"datacenter": "west", "datacenter": "west",
} }
var uint1 uint = 1
tests := []struct { tests := []struct {
description string description string
nodes []*v1.Node nodes []*v1.Node
pods []v1.Pod pods []*v1.Pod
evictLocalStoragePods bool evictLocalStoragePods bool
evictSystemCriticalPods bool evictSystemCriticalPods bool
maxPodsToEvictPerNode int maxPodsToEvictPerNode *uint
expectedEvictedPodCount int maxNoOfPodsToEvictPerNamespace *uint
nodeFit bool expectedEvictedPodCount uint
nodeFit bool
}{ }{
{ {
description: "Pods not tolerating node taint should be evicted", description: "Pods not tolerating node taint should be evicted",
pods: []v1.Pod{*p1, *p2, *p3}, pods: []*v1.Pod{p1, p2, p3},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p2 gets evicted expectedEvictedPodCount: 1, //p2 gets evicted
}, },
{ {
description: "Pods with tolerations but not tolerating node taint should be evicted", description: "Pods with tolerations but not tolerating node taint should be evicted",
pods: []v1.Pod{*p1, *p3, *p4}, pods: []*v1.Pod{p1, p3, p4},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p4 gets evicted expectedEvictedPodCount: 1, //p4 gets evicted
}, },
{ {
description: "Only <maxPodsToEvictPerNode> number of Pods not tolerating node taint should be evicted", description: "Only <maxPodsToEvictPerNode> number of Pods not tolerating node taint should be evicted",
pods: []v1.Pod{*p1, *p5, *p6}, pods: []*v1.Pod{p1, p5, p6},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 1, maxPodsToEvictPerNode: &uint1,
expectedEvictedPodCount: 1, //p5 or p6 gets evicted expectedEvictedPodCount: 1, //p5 or p6 gets evicted
}, },
{
description: "Only <maxNoOfPodsToEvictPerNamespace> number of Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{p1, p5, p6},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxNoOfPodsToEvictPerNamespace: &uint1,
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
},
{ {
description: "Critical pods not tolerating node taint should not be evicted", description: "Critical pods not tolerating node taint should not be evicted",
pods: []v1.Pod{*p7, *p8, *p9, *p10}, pods: []*v1.Pod{p7, p8, p9, p10},
nodes: []*v1.Node{node2}, nodes: []*v1.Node{node2},
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 0, //nothing is evicted expectedEvictedPodCount: 0, //nothing is evicted
}, },
{ {
description: "Critical pods except storage pods not tolerating node taint should not be evicted", description: "Critical pods except storage pods not tolerating node taint should not be evicted",
pods: []v1.Pod{*p7, *p8, *p9, *p10}, pods: []*v1.Pod{p7, p8, p9, p10},
nodes: []*v1.Node{node2}, nodes: []*v1.Node{node2},
evictLocalStoragePods: true, evictLocalStoragePods: true,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p9 gets evicted expectedEvictedPodCount: 1, //p9 gets evicted
}, },
{ {
description: "Critical and non critical pods, only non critical pods not tolerating node taint should be evicted", description: "Critical and non critical pods, only non critical pods not tolerating node taint should be evicted",
pods: []v1.Pod{*p7, *p8, *p10, *p11}, pods: []*v1.Pod{p7, p8, p10, p11},
nodes: []*v1.Node{node2}, nodes: []*v1.Node{node2},
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p11 gets evicted expectedEvictedPodCount: 1, //p11 gets evicted
}, },
{ {
description: "Critical and non critical pods, pods not tolerating node taint should be evicted even if they are critical", description: "Critical and non critical pods, pods not tolerating node taint should be evicted even if they are critical",
pods: []v1.Pod{*p2, *p7, *p9, *p10}, pods: []*v1.Pod{p2, p7, p9, p10},
nodes: []*v1.Node{node2}, nodes: []*v1.Node{node1, node2},
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: true, evictSystemCriticalPods: true,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 2, //p2 and p7 are evicted expectedEvictedPodCount: 2, //p2 and p7 are evicted
}, },
{ {
description: "Pod p2 doesn't tolerate taint on it's node, but also doesn't tolerate taints on other nodes", description: "Pod p2 doesn't tolerate taint on it's node, but also doesn't tolerate taints on other nodes",
pods: []v1.Pod{*p1, *p2, *p3}, pods: []*v1.Pod{p1, p2, p3},
nodes: []*v1.Node{node1, node2}, nodes: []*v1.Node{node1, node2},
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 0, //p2 gets evicted expectedEvictedPodCount: 0, //p2 gets evicted
nodeFit: true, nodeFit: true,
}, },
{ {
description: "Pod p12 doesn't tolerate taint on it's node, but other nodes don't match it's selector", description: "Pod p12 doesn't tolerate taint on it's node, but other nodes don't match it's selector",
pods: []v1.Pod{*p1, *p3, *p12}, pods: []*v1.Pod{p1, p3, p12},
nodes: []*v1.Node{node1, node3}, nodes: []*v1.Node{node1, node3},
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 0, //p2 gets evicted expectedEvictedPodCount: 0, //p2 gets evicted
nodeFit: true, nodeFit: true,
}, },
{ {
description: "Pod p2 doesn't tolerate taint on it's node, but other nodes are unschedulable", description: "Pod p2 doesn't tolerate taint on it's node, but other nodes are unschedulable",
pods: []v1.Pod{*p1, *p2, *p3}, pods: []*v1.Pod{p1, p2, p3},
nodes: []*v1.Node{node1, node4}, nodes: []*v1.Node{node1, node4},
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 0, //p2 gets evicted expectedEvictedPodCount: 0, //p2 gets evicted
nodeFit: true, nodeFit: true,
}, },
} }
for _, tc := range tests { for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
// create fake client ctx, cancel := context.WithCancel(context.Background())
fakeClient := &fake.Clientset{} defer cancel()
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: tc.pods}, nil var objs []runtime.Object
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range tc.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes,
tc.evictLocalStoragePods,
tc.evictSystemCriticalPods,
false,
false,
false,
)
strategy := api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: tc.nodeFit,
},
}
RemovePodsViolatingNodeTaints(ctx, fakeClient, strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
}
}) })
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.nodes,
tc.evictLocalStoragePods,
tc.evictSystemCriticalPods,
false,
)
strategy := api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: tc.nodeFit,
},
}
RemovePodsViolatingNodeTaints(ctx, fakeClient, strategy, tc.nodes, podEvictor)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
}
} }
} }
func TestToleratesTaint(t *testing.T) { func TestToleratesTaint(t *testing.T) {

View File

@@ -24,15 +24,17 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
) )
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its strategy. // HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its strategy.
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage. // Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
if err := validateNodeUtilizationParams(strategy.Params); err != nil { if err := validateNodeUtilizationParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid HighNodeUtilization parameters") klog.ErrorS(err, "Invalid HighNodeUtilization parameters")
return return
@@ -61,7 +63,7 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
resourceNames := getResourceNames(targetThresholds) resourceNames := getResourceNames(targetThresholds)
sourceNodes, highNodes := classifyNodes( sourceNodes, highNodes := classifyNodes(
getNodeUsage(ctx, client, nodes, thresholds, targetThresholds, resourceNames), getNodeUsage(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode),
func(node *v1.Node, usage NodeUsage) bool { func(node *v1.Node, usage NodeUsage) bool {
return isNodeWithLowUtilization(usage) return isNodeWithLowUtilization(usage)
}, },
@@ -92,8 +94,8 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further") klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
return return
} }
if len(sourceNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes { if len(sourceNodes) <= strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
klog.V(1).InfoS("Number of nodes underutilized is less than NumberOfNodes, nothing to do here", "underutilizedNodes", len(sourceNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes) klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(sourceNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
return return
} }
if len(sourceNodes) == len(nodes) { if len(sourceNodes) == len(nodes) {

View File

@@ -19,23 +19,24 @@ package nodeutilization
import ( import (
"context" "context"
"fmt" "fmt"
"strings"
"testing" "testing"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/api/policy/v1beta1" "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
func TestHighNodeUtilization(t *testing.T) { func TestHighNodeUtilization(t *testing.T) {
ctx := context.Background()
n1NodeName := "n1" n1NodeName := "n1"
n2NodeName := "n2" n2NodeName := "n2"
n3NodeName := "n3" n3NodeName := "n3"
@@ -44,13 +45,12 @@ func TestHighNodeUtilization(t *testing.T) {
nodeSelectorValue := "west" nodeSelectorValue := "west"
testCases := []struct { testCases := []struct {
name string name string
thresholds api.ResourceThresholds thresholds api.ResourceThresholds
nodes map[string]*v1.Node nodes []*v1.Node
pods map[string]*v1.PodList pods []*v1.Pod
maxPodsToEvictPerNode int expectedPodsEvicted uint
expectedPodsEvicted int evictedPods []string
evictedPods []string
}{ }{
{ {
name: "no node below threshold usage", name: "no node below threshold usage",
@@ -58,39 +58,26 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 20, v1.ResourceCPU: 20,
v1.ResourcePods: 20, v1.ResourcePods: 20,
}, },
nodes: map[string]*v1.Node{ nodes: []*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil), test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil), test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil), test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
}, },
pods: map[string]*v1.PodList{ pods: []*v1.Pod{
n1NodeName: { // These won't be evicted.
Items: []v1.Pod{ test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted. test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef), // These won't be evicted.
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
n2NodeName: { // These won't be evicted.
Items: []v1.Pod{ test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
// These won't be evicted. test.BuildTestPod("p8", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef), test.BuildTestPod("p9", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{
// These won't be evicted.
*test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p8", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p9", 400, 0, n3NodeName, test.SetRSOwnerRef),
},
},
}, },
maxPodsToEvictPerNode: 0, expectedPodsEvicted: 0,
expectedPodsEvicted: 0,
}, },
{ {
name: "no evictable pods", name: "no evictable pods",
@@ -98,57 +85,44 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 40, v1.ResourceCPU: 40,
v1.ResourcePods: 40, v1.ResourcePods: 40,
}, },
nodes: map[string]*v1.Node{ nodes: []*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil), test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil), test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil), test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
}, },
pods: map[string]*v1.PodList{ pods: []*v1.Pod{
n1NodeName: { // These won't be evicted.
Items: []v1.Pod{ test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
// These won't be evicted. // A pod with local storage.
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) { test.SetNormalOwnerRef(pod)
// A pod with local storage. pod.Spec.Volumes = []v1.Volume{
test.SetNormalOwnerRef(pod) {
pod.Spec.Volumes = []v1.Volume{ Name: "sample",
{ VolumeSource: v1.VolumeSource{
Name: "sample", HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
VolumeSource: v1.VolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"}, SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
EmptyDir: &v1.EmptyDirVolumeSource{ },
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)}, },
}, }
}, // A Mirror Pod.
} pod.Annotations = test.GetMirrorPodAnnotation()
// A Mirror Pod. }),
pod.Annotations = test.GetMirrorPodAnnotation() test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
}), // A Critical Pod.
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) { pod.Namespace = "kube-system"
// A Critical Pod. priority := utils.SystemCriticalPriority
pod.Namespace = "kube-system" pod.Spec.Priority = &priority
priority := utils.SystemCriticalPriority }),
pod.Spec.Priority = &priority // These won't be evicted.
}), test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetDSOwnerRef),
}, test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetDSOwnerRef),
}, test.BuildTestPod("p5", 400, 0, n3NodeName, test.SetRSOwnerRef),
n2NodeName: { test.BuildTestPod("p6", 400, 0, n3NodeName, test.SetRSOwnerRef),
Items: []v1.Pod{ test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
// These won't be evicted. test.BuildTestPod("p8", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetDSOwnerRef),
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetDSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p5", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p6", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p8", 400, 0, n3NodeName, test.SetRSOwnerRef),
},
},
}, },
maxPodsToEvictPerNode: 0, expectedPodsEvicted: 0,
expectedPodsEvicted: 0,
}, },
{ {
name: "no node to schedule evicted pods", name: "no node to schedule evicted pods",
@@ -156,34 +130,21 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 20, v1.ResourceCPU: 20,
v1.ResourcePods: 20, v1.ResourcePods: 20,
}, },
nodes: map[string]*v1.Node{ nodes: []*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil), test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil), test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable), test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
}, },
pods: map[string]*v1.PodList{ pods: []*v1.Pod{
n1NodeName: { // These can't be evicted.
Items: []v1.Pod{ test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These can't be evicted. // These can't be evicted.
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p2", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, test.BuildTestPod("p3", 400, 0, n3NodeName, test.SetRSOwnerRef),
}, test.BuildTestPod("p4", 400, 0, n3NodeName, test.SetRSOwnerRef),
n2NodeName: { test.BuildTestPod("p5", 400, 0, n3NodeName, test.SetRSOwnerRef),
Items: []v1.Pod{
// These can't be evicted.
*test.BuildTestPod("p2", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p3", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p4", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p5", 400, 0, n3NodeName, test.SetRSOwnerRef),
},
},
}, },
maxPodsToEvictPerNode: 0, expectedPodsEvicted: 0,
expectedPodsEvicted: 0,
}, },
{ {
name: "without priorities", name: "without priorities",
@@ -191,42 +152,29 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 30, v1.ResourceCPU: 30,
v1.ResourcePods: 30, v1.ResourcePods: 30,
}, },
nodes: map[string]*v1.Node{ nodes: []*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil), test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil), test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable), test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
}, },
pods: map[string]*v1.PodList{ pods: []*v1.Pod{
n1NodeName: { test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
Items: []v1.Pod{ // These won't be evicted.
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
// These won't be evicted. // A Critical Pod.
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) { pod.Namespace = "kube-system"
// A Critical Pod. priority := utils.SystemCriticalPriority
pod.Namespace = "kube-system" pod.Spec.Priority = &priority
priority := utils.SystemCriticalPriority }),
pod.Spec.Priority = &priority // These won't be evicted.
}), test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
n2NodeName: { test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
Items: []v1.Pod{ test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
// These won't be evicted.
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
},
},
}, },
maxPodsToEvictPerNode: 0, expectedPodsEvicted: 2,
expectedPodsEvicted: 2, evictedPods: []string{"p1", "p7"},
evictedPods: []string{"p1", "p7"},
}, },
{ {
name: "without priorities stop when resource capacity is depleted", name: "without priorities stop when resource capacity is depleted",
@@ -234,117 +182,79 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 30, v1.ResourceCPU: 30,
v1.ResourcePods: 30, v1.ResourcePods: 30,
}, },
nodes: map[string]*v1.Node{ nodes: []*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 2000, 3000, 10, nil), test.BuildTestNode(n1NodeName, 2000, 3000, 10, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 2000, 3000, 10, nil), test.BuildTestNode(n2NodeName, 2000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 2000, 3000, 10, test.SetNodeUnschedulable), test.BuildTestNode(n3NodeName, 2000, 3000, 10, test.SetNodeUnschedulable),
}, },
pods: map[string]*v1.PodList{ pods: []*v1.Pod{
n1NodeName: { test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
Items: []v1.Pod{ // These won't be evicted.
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p2", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
n2NodeName: { test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
Items: []v1.Pod{ test.BuildTestPod("p6", 400, 0, n3NodeName, test.SetRSOwnerRef),
// These won't be evicted.
*test.BuildTestPod("p2", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p6", 400, 0, n3NodeName, test.SetRSOwnerRef),
},
},
}, },
maxPodsToEvictPerNode: 0, expectedPodsEvicted: 1,
expectedPodsEvicted: 1,
}, },
{ {
name: "with priorities", name: "with priorities",
thresholds: api.ResourceThresholds{ thresholds: api.ResourceThresholds{
v1.ResourceCPU: 30, v1.ResourceCPU: 30,
}, },
nodes: map[string]*v1.Node{ nodes: []*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil), test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 2000, 3000, 10, nil), test.BuildTestNode(n2NodeName, 2000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 2000, 3000, 10, test.SetNodeUnschedulable), test.BuildTestNode(n3NodeName, 2000, 3000, 10, test.SetNodeUnschedulable),
}, },
pods: map[string]*v1.PodList{ pods: []*v1.Pod{
n1NodeName: { test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
Items: []v1.Pod{ test.SetRSOwnerRef(pod)
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) { test.SetPodPriority(pod, lowPriority)
test.SetRSOwnerRef(pod) }),
test.SetPodPriority(pod, lowPriority) test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
}), test.SetRSOwnerRef(pod)
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) { test.SetPodPriority(pod, highPriority)
test.SetRSOwnerRef(pod) }),
test.SetPodPriority(pod, highPriority) // These won't be evicted.
}), test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, test.BuildTestPod("p7", 400, 0, n2NodeName, test.SetRSOwnerRef),
n2NodeName: { test.BuildTestPod("p8", 400, 0, n2NodeName, test.SetRSOwnerRef),
Items: []v1.Pod{ // These won't be evicted.
// These won't be evicted. test.BuildTestPod("p9", 400, 0, n3NodeName, test.SetDSOwnerRef),
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p7", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p8", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{
// These won't be evicted.
*test.BuildTestPod("p9", 400, 0, n3NodeName, test.SetDSOwnerRef),
},
},
}, },
maxPodsToEvictPerNode: 0, expectedPodsEvicted: 1,
expectedPodsEvicted: 1, evictedPods: []string{"p1"},
evictedPods: []string{"p1"},
}, },
{ {
name: "without priorities evicting best-effort pods only", name: "without priorities evicting best-effort pods only",
thresholds: api.ResourceThresholds{ thresholds: api.ResourceThresholds{
v1.ResourceCPU: 30, v1.ResourceCPU: 30,
}, },
nodes: map[string]*v1.Node{ nodes: []*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 3000, 3000, 10, nil), test.BuildTestNode(n1NodeName, 3000, 3000, 10, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 3000, 3000, 5, nil), test.BuildTestNode(n2NodeName, 3000, 3000, 5, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 3000, 3000, 10, test.SetNodeUnschedulable), test.BuildTestNode(n3NodeName, 3000, 3000, 10, test.SetNodeUnschedulable),
}, },
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value) // All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
pods: map[string]*v1.PodList{ pods: []*v1.Pod{
n1NodeName: { test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
Items: []v1.Pod{ test.SetRSOwnerRef(pod)
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) { test.MakeBestEffortPod(pod)
test.SetRSOwnerRef(pod) }),
test.MakeBestEffortPod(pod) test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
}), test.SetRSOwnerRef(pod)
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) { }),
test.SetRSOwnerRef(pod) // These won't be evicted.
}), test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
}, test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
n2NodeName: { test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
Items: []v1.Pod{
// These won't be evicted.
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{},
},
}, },
maxPodsToEvictPerNode: 0, expectedPodsEvicted: 1,
expectedPodsEvicted: 1, evictedPods: []string{"p1"},
evictedPods: []string{"p1"},
}, },
{ {
name: "with extended resource", name: "with extended resource",
@@ -352,60 +262,44 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 20, v1.ResourceCPU: 20,
extendedResource: 40, extendedResource: 40,
}, },
nodes: map[string]*v1.Node{ nodes: []*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) { test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
test.SetNodeExtendedResource(node, extendedResource, 8) test.SetNodeExtendedResource(node, extendedResource, 8)
}), }),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) { test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
test.SetNodeExtendedResource(node, extendedResource, 8) test.SetNodeExtendedResource(node, extendedResource, 8)
}), }),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable), test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
}, },
pods: map[string]*v1.PodList{ pods: []*v1.Pod{
n1NodeName: { test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
Items: []v1.Pod{ test.SetRSOwnerRef(pod)
*test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) { test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
test.SetRSOwnerRef(pod) }),
test.SetPodExtendedResourceRequest(pod, extendedResource, 1) test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
}), test.SetRSOwnerRef(pod)
*test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) { test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
test.SetRSOwnerRef(pod) }),
test.SetPodExtendedResourceRequest(pod, extendedResource, 1) // These won't be evicted
}), test.BuildTestPod("p3", 500, 0, n2NodeName, func(pod *v1.Pod) {
// These won't be evicted test.SetRSOwnerRef(pod)
*test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) { test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
test.SetDSOwnerRef(pod) }),
test.SetPodExtendedResourceRequest(pod, extendedResource, 1) test.BuildTestPod("p4", 500, 0, n2NodeName, func(pod *v1.Pod) {
}), test.SetRSOwnerRef(pod)
}, test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}, }),
n2NodeName: { test.BuildTestPod("p5", 500, 0, n2NodeName, func(pod *v1.Pod) {
Items: []v1.Pod{ test.SetRSOwnerRef(pod)
*test.BuildTestPod("p3", 500, 0, n2NodeName, func(pod *v1.Pod) { test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
test.SetRSOwnerRef(pod) }),
test.SetPodExtendedResourceRequest(pod, extendedResource, 1) test.BuildTestPod("p6", 500, 0, n2NodeName, func(pod *v1.Pod) {
}), test.SetRSOwnerRef(pod)
*test.BuildTestPod("p4", 500, 0, n2NodeName, func(pod *v1.Pod) { test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
test.SetRSOwnerRef(pod) }),
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
*test.BuildTestPod("p5", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
*test.BuildTestPod("p6", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
},
},
n3NodeName: {
Items: []v1.Pod{},
},
}, },
maxPodsToEvictPerNode: 0, expectedPodsEvicted: 2,
expectedPodsEvicted: 2, evictedPods: []string{"p1", "p2"},
evictedPods: []string{"p1", "p2"},
}, },
{ {
name: "with extended resource in some of nodes", name: "with extended resource in some of nodes",
@@ -413,41 +307,29 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 40, v1.ResourceCPU: 40,
extendedResource: 40, extendedResource: 40,
}, },
nodes: map[string]*v1.Node{ nodes: []*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) { test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
test.SetNodeExtendedResource(node, extendedResource, 8) test.SetNodeExtendedResource(node, extendedResource, 8)
}), }),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil), test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable), test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
}, },
pods: map[string]*v1.PodList{ pods: []*v1.Pod{
n1NodeName: { //These won't be evicted
Items: []v1.Pod{ test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
//These won't be evicted test.SetRSOwnerRef(pod)
*test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) { test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
test.SetRSOwnerRef(pod) }),
test.SetPodExtendedResourceRequest(pod, extendedResource, 1) test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
}), test.SetRSOwnerRef(pod)
*test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) { test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
test.SetRSOwnerRef(pod) }),
test.SetPodExtendedResourceRequest(pod, extendedResource, 1) test.BuildTestPod("p3", 500, 0, n2NodeName, test.SetRSOwnerRef),
}), test.BuildTestPod("p4", 500, 0, n2NodeName, test.SetRSOwnerRef),
}, test.BuildTestPod("p5", 500, 0, n2NodeName, test.SetRSOwnerRef),
}, test.BuildTestPod("p6", 500, 0, n2NodeName, test.SetRSOwnerRef),
n2NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p3", 500, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p4", 500, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p5", 500, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p6", 500, 0, n2NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{},
},
}, },
maxPodsToEvictPerNode: 0, expectedPodsEvicted: 0,
expectedPodsEvicted: 0,
}, },
{ {
name: "Other node match pod node selector", name: "Other node match pod node selector",
@@ -455,37 +337,28 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 30, v1.ResourceCPU: 30,
v1.ResourcePods: 30, v1.ResourcePods: 30,
}, },
nodes: map[string]*v1.Node{ nodes: []*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, func(node *v1.Node) { test.BuildTestNode(n1NodeName, 4000, 3000, 9, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{ node.ObjectMeta.Labels = map[string]string{
nodeSelectorKey: nodeSelectorValue, nodeSelectorKey: nodeSelectorValue,
} }
}), }),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil), test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
}, },
pods: map[string]*v1.PodList{ pods: []*v1.Pod{
n1NodeName: { test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
Items: []v1.Pod{ test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) {
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef), // A pod selecting nodes in the "west" datacenter
}, test.SetRSOwnerRef(pod)
}, pod.Spec.NodeSelector = map[string]string{
n2NodeName: { nodeSelectorKey: nodeSelectorValue,
Items: []v1.Pod{ }
*test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) { }),
// A pod selecting nodes in the "west" datacenter
test.SetRSOwnerRef(pod)
pod.Spec.NodeSelector = map[string]string{
nodeSelectorKey: nodeSelectorValue,
}
}),
},
},
}, },
maxPodsToEvictPerNode: 0, expectedPodsEvicted: 1,
expectedPodsEvicted: 1,
}, },
{ {
name: "Other node does not match pod node selector", name: "Other node does not match pod node selector",
@@ -493,67 +366,56 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 30, v1.ResourceCPU: 30,
v1.ResourcePods: 30, v1.ResourcePods: 30,
}, },
nodes: map[string]*v1.Node{ nodes: []*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil), test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil), test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
}, },
pods: map[string]*v1.PodList{ pods: []*v1.Pod{
n1NodeName: { test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
Items: []v1.Pod{ test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef), test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) {
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef), // A pod selecting nodes in the "west" datacenter
}, test.SetRSOwnerRef(pod)
}, pod.Spec.NodeSelector = map[string]string{
n2NodeName: { nodeSelectorKey: nodeSelectorValue,
Items: []v1.Pod{ }
*test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) { }),
// A pod selecting nodes in the "west" datacenter
test.SetRSOwnerRef(pod)
pod.Spec.NodeSelector = map[string]string{
nodeSelectorKey: nodeSelectorValue,
}
}),
},
},
}, },
maxPodsToEvictPerNode: 0, expectedPodsEvicted: 0,
expectedPodsEvicted: 0,
}, },
} }
for _, test := range testCases { for _, testCase := range testCases {
t.Run(test.name, func(t *testing.T) { t.Run(testCase.name, func(t *testing.T) {
fakeClient := &fake.Clientset{} ctx, cancel := context.WithCancel(context.Background())
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { defer cancel()
list := action.(core.ListAction)
fieldString := list.GetListRestrictions().Fields.String() var objs []runtime.Object
if strings.Contains(fieldString, n1NodeName) { for _, node := range testCase.nodes {
return true, test.pods[n1NodeName], nil objs = append(objs, node)
} }
if strings.Contains(fieldString, n2NodeName) { for _, pod := range testCase.pods {
return true, test.pods[n2NodeName], nil objs = append(objs, pod)
} }
if strings.Contains(fieldString, n3NodeName) { fakeClient := fake.NewSimpleClientset(objs...)
return true, test.pods[n3NodeName], nil
} sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
return true, nil, fmt.Errorf("Failed to list: %v", list) podInformer := sharedInformerFactory.Core().V1().Pods()
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
getAction := action.(core.GetAction) if err != nil {
if node, exists := test.nodes[getAction.GetName()]; exists { t.Errorf("Build get pods assigned to node function error: %v", err)
return true, node, nil }
}
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
})
podsForEviction := make(map[string]struct{}) podsForEviction := make(map[string]struct{})
for _, pod := range test.evictedPods { for _, pod := range testCase.evictedPods {
podsForEviction[pod] = struct{}{} podsForEviction[pod] = struct{}{}
} }
evictionFailed := false evictionFailed := false
if len(test.evictedPods) > 0 { if len(testCase.evictedPods) > 0 {
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) { fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.CreateAction) getAction := action.(core.CreateAction)
obj := getAction.GetObject() obj := getAction.GetObject()
@@ -568,17 +430,41 @@ func TestHighNodeUtilization(t *testing.T) {
}) })
} }
var nodes []*v1.Node sharedInformerFactory.Start(ctx.Done())
for _, node := range test.nodes { sharedInformerFactory.WaitForCacheSync(ctx.Done())
nodes = append(nodes, node)
} //fakeClient := &fake.Clientset{}
//fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
// list := action.(core.ListAction)
// fieldString := list.GetListRestrictions().Fields.String()
// if strings.Contains(fieldString, n1NodeName) {
// return true, test.pods[n1NodeName], nil
// }
// if strings.Contains(fieldString, n2NodeName) {
// return true, test.pods[n2NodeName], nil
// }
// if strings.Contains(fieldString, n3NodeName) {
// return true, test.pods[n3NodeName], nil
// }
// return true, nil, fmt.Errorf("Failed to list: %v", list)
//})
//fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
// getAction := action.(core.GetAction)
// if node, exists := testCase.nodes[getAction.GetName()]; exists {
// return true, node, nil
// }
// return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
//})
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
"v1", "v1",
false, false,
test.maxPodsToEvictPerNode, nil,
nodes, nil,
testCase.nodes,
false,
false,
false, false,
false, false,
false, false,
@@ -588,16 +474,16 @@ func TestHighNodeUtilization(t *testing.T) {
Enabled: true, Enabled: true,
Params: &api.StrategyParameters{ Params: &api.StrategyParameters{
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{ NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
Thresholds: test.thresholds, Thresholds: testCase.thresholds,
}, },
NodeFit: true, NodeFit: true,
}, },
} }
HighNodeUtilization(ctx, fakeClient, strategy, nodes, podEvictor) HighNodeUtilization(ctx, fakeClient, strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
podsEvicted := podEvictor.TotalEvicted() podsEvicted := podEvictor.TotalEvicted()
if test.expectedPodsEvicted != podsEvicted { if testCase.expectedPodsEvicted != podsEvicted {
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted) t.Errorf("Expected %v pods to be evicted but %v got evicted", testCase.expectedPodsEvicted, podsEvicted)
} }
if evictionFailed { if evictionFailed {
t.Errorf("Pod evictions failed unexpectedly") t.Errorf("Pod evictions failed unexpectedly")
@@ -674,7 +560,6 @@ func TestValidateHighNodeUtilizationStrategyConfig(t *testing.T) {
} }
func TestHighNodeUtilizationWithTaints(t *testing.T) { func TestHighNodeUtilizationWithTaints(t *testing.T) {
ctx := context.Background()
strategy := api.DeschedulerStrategy{ strategy := api.DeschedulerStrategy{
Enabled: true, Enabled: true,
Params: &api.StrategyParameters{ Params: &api.StrategyParameters{
@@ -710,7 +595,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
name string name string
nodes []*v1.Node nodes []*v1.Node
pods []*v1.Pod pods []*v1.Pod
evictionsExpected int evictionsExpected uint
}{ }{
{ {
name: "No taints", name: "No taints",
@@ -752,6 +637,9 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
for _, item := range tests { for _, item := range tests {
t.Run(item.name, func(t *testing.T) { t.Run(item.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object var objs []runtime.Object
for _, node := range item.nodes { for _, node := range item.nodes {
objs = append(objs, node) objs = append(objs, node)
@@ -762,19 +650,32 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
} }
fakeClient := fake.NewSimpleClientset(objs...) fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
"policy/v1", "policy/v1",
false, false,
item.evictionsExpected, &item.evictionsExpected,
nil,
item.nodes, item.nodes,
false, false,
false, false,
false, false,
false,
false,
) )
HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor) HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, getPodsAssignedToNode)
if item.evictionsExpected != podEvictor.TotalEvicted() { if item.evictionsExpected != podEvictor.TotalEvicted() {
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted()) t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())

View File

@@ -19,6 +19,7 @@ package nodeutilization
import ( import (
"context" "context"
"fmt" "fmt"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@@ -27,12 +28,13 @@ import (
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
) )
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used // LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
// to calculate nodes' utilization and not the actual resource usage. // to calculate nodes' utilization and not the actual resource usage.
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params? // TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
if err := validateNodeUtilizationParams(strategy.Params); err != nil { if err := validateNodeUtilizationParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid LowNodeUtilization parameters") klog.ErrorS(err, "Invalid LowNodeUtilization parameters")
@@ -71,7 +73,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
resourceNames := getResourceNames(thresholds) resourceNames := getResourceNames(thresholds)
lowNodes, sourceNodes := classifyNodes( lowNodes, sourceNodes := classifyNodes(
getNodeUsage(ctx, client, nodes, thresholds, targetThresholds, resourceNames), getNodeUsage(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode),
// The node has to be schedulable (to be able to move workload there) // The node has to be schedulable (to be able to move workload there)
func(node *v1.Node, usage NodeUsage) bool { func(node *v1.Node, usage NodeUsage) bool {
if nodeutil.IsNodeUnschedulable(node) { if nodeutil.IsNodeUnschedulable(node) {
@@ -118,8 +120,8 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
return return
} }
if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes { if len(lowNodes) <= strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
klog.V(1).InfoS("Number of nodes underutilized is less than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes) klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
return return
} }

View File

@@ -19,15 +19,16 @@ package nodeutilization
import ( import (
"context" "context"
"fmt" "fmt"
"sort"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
"sort"
) )
// NodeUsage stores a node's info, pods on it, thresholds and its resource usage // NodeUsage stores a node's info, pods on it, thresholds and its resource usage
@@ -77,16 +78,15 @@ func validateThresholds(thresholds api.ResourceThresholds) error {
} }
func getNodeUsage( func getNodeUsage(
ctx context.Context,
client clientset.Interface,
nodes []*v1.Node, nodes []*v1.Node,
lowThreshold, highThreshold api.ResourceThresholds, lowThreshold, highThreshold api.ResourceThresholds,
resourceNames []v1.ResourceName, resourceNames []v1.ResourceName,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) []NodeUsage { ) []NodeUsage {
var nodeUsageList []NodeUsage var nodeUsageList []NodeUsage
for _, node := range nodes { for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(ctx, client, node) pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, nil)
if err != nil { if err != nil {
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err) klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
continue continue

View File

@@ -20,6 +20,7 @@ import (
"context" "context"
"fmt" "fmt"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
@@ -48,18 +49,18 @@ func validateRemovePodsViolatingInterPodAntiAffinityParams(params *api.StrategyP
} }
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules. // RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
if err := validateRemovePodsViolatingInterPodAntiAffinityParams(strategy.Params); err != nil { if err := validateRemovePodsViolatingInterPodAntiAffinityParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingInterPodAntiAffinity parameters") klog.ErrorS(err, "Invalid RemovePodsViolatingInterPodAntiAffinity parameters")
return return
} }
var includedNamespaces, excludedNamespaces []string var includedNamespaces, excludedNamespaces sets.String
var labelSelector *metav1.LabelSelector var labelSelector *metav1.LabelSelector
if strategy.Params != nil { if strategy.Params != nil {
if strategy.Params.Namespaces != nil { if strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = strategy.Params.Namespaces.Exclude excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
} }
labelSelector = strategy.Params.LabelSelector labelSelector = strategy.Params.LabelSelector
} }
@@ -77,16 +78,19 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
podFilter, err := podutil.NewOptions().
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(labelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, node := range nodes { for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode( pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
ctx,
client,
node,
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
podutil.WithLabelSelector(labelSelector),
)
if err != nil { if err != nil {
return return
} }

View File

@@ -24,16 +24,17 @@ import (
policyv1 "k8s.io/api/policy/v1" policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
func TestPodAntiAffinity(t *testing.T) { func TestPodAntiAffinity(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil) node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, func(node *v1.Node) { node2 := test.BuildTestNode("n2", 2000, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{ node.ObjectMeta.Labels = map[string]string{
@@ -55,6 +56,10 @@ func TestPodAntiAffinity(t *testing.T) {
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil) p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
p7 := test.BuildTestPod("p7", 100, 0, node1.Name, nil) p7 := test.BuildTestPod("p7", 100, 0, node1.Name, nil)
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil) p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
p9 := test.BuildTestPod("p9", 100, 0, node1.Name, nil)
p10 := test.BuildTestPod("p10", 100, 0, node1.Name, nil)
p9.DeletionTimestamp = &metav1.Time{}
p10.DeletionTimestamp = &metav1.Time{}
criticalPriority := utils.SystemCriticalPriority criticalPriority := utils.SystemCriticalPriority
nonEvictablePod := test.BuildTestPod("non-evict", 100, 0, node1.Name, func(pod *v1.Pod) { nonEvictablePod := test.BuildTestPod("non-evict", 100, 0, node1.Name, func(pod *v1.Pod) {
@@ -72,6 +77,8 @@ func TestPodAntiAffinity(t *testing.T) {
test.SetNormalOwnerRef(p5) test.SetNormalOwnerRef(p5)
test.SetNormalOwnerRef(p6) test.SetNormalOwnerRef(p6)
test.SetNormalOwnerRef(p7) test.SetNormalOwnerRef(p7)
test.SetNormalOwnerRef(p9)
test.SetNormalOwnerRef(p10)
// set pod anti affinity // set pod anti affinity
setPodAntiAffinity(p1, "foo", "bar") setPodAntiAffinity(p1, "foo", "bar")
@@ -80,6 +87,8 @@ func TestPodAntiAffinity(t *testing.T) {
setPodAntiAffinity(p5, "foo1", "bar1") setPodAntiAffinity(p5, "foo1", "bar1")
setPodAntiAffinity(p6, "foo1", "bar1") setPodAntiAffinity(p6, "foo1", "bar1")
setPodAntiAffinity(p7, "foo", "bar") setPodAntiAffinity(p7, "foo", "bar")
setPodAntiAffinity(p9, "foo", "bar")
setPodAntiAffinity(p10, "foo", "bar")
// set pod priority // set pod priority
test.SetPodPriority(p5, 100) test.SetPodPriority(p5, 100)
@@ -91,98 +100,133 @@ func TestPodAntiAffinity(t *testing.T) {
"datacenter": "west", "datacenter": "west",
} }
var uint1 uint = 1
var uint3 uint = 3
tests := []struct { tests := []struct {
description string description string
maxPodsToEvictPerNode int maxPodsToEvictPerNode *uint
pods []v1.Pod maxNoOfPodsToEvictPerNamespace *uint
expectedEvictedPodCount int pods []*v1.Pod
nodeFit bool expectedEvictedPodCount uint
nodes []*v1.Node nodeFit bool
nodes []*v1.Node
}{ }{
{ {
description: "Maximum pods to evict - 0", description: "Maximum pods to evict - 0",
maxPodsToEvictPerNode: 0, pods: []*v1.Pod{p1, p2, p3, p4},
pods: []v1.Pod{*p1, *p2, *p3, *p4},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3, expectedEvictedPodCount: 3,
}, },
{ {
description: "Maximum pods to evict - 3", description: "Maximum pods to evict - 3",
maxPodsToEvictPerNode: 3, maxPodsToEvictPerNode: &uint3,
pods: []v1.Pod{*p1, *p2, *p3, *p4}, pods: []*v1.Pod{p1, p2, p3, p4},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3, expectedEvictedPodCount: 3,
}, },
{
description: "Maximum pods to evict (maxPodsToEvictPerNamespace=3) - 3",
maxNoOfPodsToEvictPerNamespace: &uint3,
pods: []*v1.Pod{p1, p2, p3, p4},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
},
{ {
description: "Evict only 1 pod after sorting", description: "Evict only 1 pod after sorting",
maxPodsToEvictPerNode: 0, pods: []*v1.Pod{p5, p6, p7},
pods: []v1.Pod{*p5, *p6, *p7},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
}, },
{ {
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)", description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
maxPodsToEvictPerNode: 1, maxPodsToEvictPerNode: &uint1,
pods: []v1.Pod{*p1, *nonEvictablePod}, pods: []*v1.Pod{p1, nonEvictablePod},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
}, },
{ {
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)", description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
maxPodsToEvictPerNode: 1, maxPodsToEvictPerNode: &uint1,
pods: []v1.Pod{*p1, *nonEvictablePod}, pods: []*v1.Pod{p1, nonEvictablePod},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
}, },
{ {
description: "Won't evict pods because node selectors don't match available nodes", description: "Won't evict pods because node selectors don't match available nodes",
maxPodsToEvictPerNode: 1, maxPodsToEvictPerNode: &uint1,
pods: []v1.Pod{*p8, *nonEvictablePod}, pods: []*v1.Pod{p8, nonEvictablePod},
nodes: []*v1.Node{node1, node2}, nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
nodeFit: true, nodeFit: true,
}, },
{ {
description: "Won't evict pods because only other node is not schedulable", description: "Won't evict pods because only other node is not schedulable",
maxPodsToEvictPerNode: 1, maxPodsToEvictPerNode: &uint1,
pods: []v1.Pod{*p8, *nonEvictablePod}, pods: []*v1.Pod{p8, nonEvictablePod},
nodes: []*v1.Node{node1, node3}, nodes: []*v1.Node{node1, node3},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
nodeFit: true, nodeFit: true,
}, },
{
description: "No pod to evicted since all pod terminating",
pods: []*v1.Pod{p9, p10},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
},
} }
for _, test := range tests { for _, test := range tests {
// create fake client t.Run(test.description, func(t *testing.T) {
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: test.pods}, nil
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, node1, nil
})
podEvictor := evictions.NewPodEvictor( ctx, cancel := context.WithCancel(context.Background())
fakeClient, defer cancel()
policyv1.SchemeGroupVersion.String(),
false,
test.maxPodsToEvictPerNode,
test.nodes,
false,
false,
false,
)
strategy := api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: test.nodeFit,
},
}
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, strategy, test.nodes, podEvictor) var objs []runtime.Object
podsEvicted := podEvictor.TotalEvicted() for _, node := range test.nodes {
if podsEvicted != test.expectedEvictedPodCount { objs = append(objs, node)
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount) }
} for _, pod := range test.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
test.maxPodsToEvictPerNode,
test.maxNoOfPodsToEvictPerNamespace,
test.nodes,
false,
false,
false,
false,
false,
)
strategy := api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: test.nodeFit,
},
}
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, strategy, test.nodes, podEvictor, getPodsAssignedToNode)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != test.expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
}
})
} }
} }

View File

@@ -22,6 +22,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2" "k8s.io/klog/v2"
@@ -56,7 +57,7 @@ func validatePodLifeTimeParams(params *api.StrategyParameters) error {
} }
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago. // PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
if err := validatePodLifeTimeParams(strategy.Params); err != nil { if err := validatePodLifeTimeParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid PodLifeTime parameters") klog.ErrorS(err, "Invalid PodLifeTime parameters")
return return
@@ -68,10 +69,10 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
return return
} }
var includedNamespaces, excludedNamespaces []string var includedNamespaces, excludedNamespaces sets.String
if strategy.Params.Namespaces != nil { if strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = strategy.Params.Namespaces.Exclude excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
} }
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority)) evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
@@ -88,10 +89,21 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
} }
} }
podFilter, err := podutil.NewOptions().
WithFilter(filter).
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(strategy.Params.LabelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, node := range nodes { for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods := listOldPodsOnNode(ctx, client, node, includedNamespaces, excludedNamespaces, strategy.Params.LabelSelector, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds, filter) pods := listOldPodsOnNode(node.Name, getPodsAssignedToNode, podFilter, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds)
for _, pod := range pods { for _, pod := range pods {
success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime") success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime")
if success { if success {
@@ -108,23 +120,12 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
} }
func listOldPodsOnNode( func listOldPodsOnNode(
ctx context.Context, nodeName string,
client clientset.Interface, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
node *v1.Node, filter podutil.FilterFunc,
includedNamespaces, excludedNamespaces []string,
labelSelector *metav1.LabelSelector,
maxPodLifeTimeSeconds uint, maxPodLifeTimeSeconds uint,
filter func(pod *v1.Pod) bool,
) []*v1.Pod { ) []*v1.Pod {
pods, err := podutil.ListPodsOnANode( pods, err := podutil.ListPodsOnANode(nodeName, getPodsAssignedToNode, filter)
ctx,
client,
node,
podutil.WithFilter(filter),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
podutil.WithLabelSelector(labelSelector),
)
if err != nil { if err != nil {
return nil return nil
} }

View File

@@ -25,15 +25,16 @@ import (
policyv1 "k8s.io/api/policy/v1" policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
func TestPodLifeTime(t *testing.T) { func TestPodLifeTime(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil) node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
olderPodCreationTime := metav1.NewTime(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)) olderPodCreationTime := metav1.NewTime(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC))
newerPodCreationTime := metav1.NewTime(time.Now()) newerPodCreationTime := metav1.NewTime(time.Now())
@@ -125,14 +126,24 @@ func TestPodLifeTime(t *testing.T) {
p12.ObjectMeta.OwnerReferences = ownerRef1 p12.ObjectMeta.OwnerReferences = ownerRef1
p13.ObjectMeta.OwnerReferences = ownerRef1 p13.ObjectMeta.OwnerReferences = ownerRef1
p14 := test.BuildTestPod("p14", 100, 0, node1.Name, nil)
p15 := test.BuildTestPod("p15", 100, 0, node1.Name, nil)
p14.Namespace = "dev"
p15.Namespace = "dev"
p14.ObjectMeta.CreationTimestamp = olderPodCreationTime
p15.ObjectMeta.CreationTimestamp = olderPodCreationTime
p14.ObjectMeta.OwnerReferences = ownerRef1
p15.ObjectMeta.OwnerReferences = ownerRef1
p14.DeletionTimestamp = &metav1.Time{}
p15.DeletionTimestamp = &metav1.Time{}
var maxLifeTime uint = 600 var maxLifeTime uint = 600
testCases := []struct { testCases := []struct {
description string description string
strategy api.DeschedulerStrategy strategy api.DeschedulerStrategy
maxPodsToEvictPerNode int pods []*v1.Pod
pods []v1.Pod
nodes []*v1.Node nodes []*v1.Node
expectedEvictedPodCount int expectedEvictedPodCount uint
ignorePvcPods bool ignorePvcPods bool
}{ }{
{ {
@@ -143,8 +154,7 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
}, },
}, },
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p1, p2},
pods: []v1.Pod{*p1, *p2},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
}, },
@@ -156,8 +166,7 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
}, },
}, },
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p3, p4},
pods: []v1.Pod{*p3, *p4},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
}, },
@@ -169,8 +178,7 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
}, },
}, },
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p5, p6},
pods: []v1.Pod{*p5, *p6},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
}, },
@@ -182,8 +190,7 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
}, },
}, },
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p7, p8},
pods: []v1.Pod{*p7, *p8},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
}, },
@@ -198,8 +205,7 @@ func TestPodLifeTime(t *testing.T) {
}, },
}, },
}, },
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p9, p10},
pods: []v1.Pod{*p9, *p10},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
}, },
@@ -211,8 +217,7 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
}, },
}, },
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p11},
pods: []v1.Pod{*p11},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
ignorePvcPods: true, ignorePvcPods: true,
@@ -225,13 +230,12 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime}, PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
}, },
}, },
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p11},
pods: []v1.Pod{*p11},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
}, },
{ {
description: "Two old pods with different labels, 1 selected by labelSelector", description: "No pod to evicted since all pod terminating",
strategy: api.DeschedulerStrategy{ strategy: api.DeschedulerStrategy{
Enabled: true, Enabled: true,
Params: &api.StrategyParameters{ Params: &api.StrategyParameters{
@@ -241,36 +245,71 @@ func TestPodLifeTime(t *testing.T) {
}, },
}, },
}, },
maxPodsToEvictPerNode: 5, pods: []*v1.Pod{p12, p13},
pods: []v1.Pod{*p12, *p13},
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
}, },
{
description: "No pod should be evicted since pod terminating",
strategy: api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
},
},
pods: []*v1.Pod{p14, p15},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
},
} }
for _, tc := range testCases { for _, tc := range testCases {
fakeClient := &fake.Clientset{} t.Run(tc.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { var objs []runtime.Object
return true, &v1.PodList{Items: tc.pods}, nil for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range tc.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
nil,
nil,
tc.nodes,
false,
false,
tc.ignorePvcPods,
false,
false,
)
PodLifeTime(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != tc.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
}
}) })
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.nodes,
false,
false,
tc.ignorePvcPods,
)
PodLifeTime(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != tc.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
}
} }
} }

View File

@@ -21,6 +21,7 @@ import (
"fmt" "fmt"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2" "k8s.io/klog/v2"
@@ -49,7 +50,7 @@ func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameter
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node. // RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings. // There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages. // As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil { if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsHavingTooManyRestarts parameters") klog.ErrorS(err, "Invalid RemovePodsHavingTooManyRestarts parameters")
return return
@@ -61,10 +62,10 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
return return
} }
var includedNamespaces, excludedNamespaces []string var includedNamespaces, excludedNamespaces sets.String
if strategy.Params.Namespaces != nil { if strategy.Params.Namespaces != nil {
includedNamespaces = strategy.Params.Namespaces.Include includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = strategy.Params.Namespaces.Exclude excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
} }
nodeFit := false nodeFit := false
@@ -74,17 +75,20 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
podFilter, err := podutil.NewOptions().
WithFilter(evictable.IsEvictable).
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(strategy.Params.LabelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, node := range nodes { for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode( pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
ctx,
client,
node,
podutil.WithFilter(evictable.IsEvictable),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
podutil.WithLabelSelector(strategy.Params.LabelSelector),
)
if err != nil { if err != nil {
klog.ErrorS(err, "Error listing a nodes pods", "node", klog.KObj(node)) klog.ErrorS(err, "Error listing a nodes pods", "node", klog.KObj(node))
continue continue

View File

@@ -18,23 +18,24 @@ package strategies
import ( import (
"context" "context"
"testing"
"fmt" "fmt"
"testing"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1" policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
func initPods(node *v1.Node) []v1.Pod { func initPods(node *v1.Node) []*v1.Pod {
pods := make([]v1.Pod, 0) pods := make([]*v1.Pod, 0)
for i := int32(0); i <= 9; i++ { for i := int32(0); i <= 9; i++ {
pod := test.BuildTestPod(fmt.Sprintf("pod-%d", i), 100, 0, node.Name, nil) pod := test.BuildTestPod(fmt.Sprintf("pod-%d", i), 100, 0, node.Name, nil)
@@ -56,7 +57,7 @@ func initPods(node *v1.Node) []v1.Pod {
}, },
}, },
} }
pods = append(pods, *pod) pods = append(pods, pod)
} }
// The following 3 pods won't get evicted. // The following 3 pods won't get evicted.
@@ -81,8 +82,6 @@ func initPods(node *v1.Node) []v1.Pod {
} }
func TestRemovePodsHavingTooManyRestarts(t *testing.T) { func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("node1", 2000, 3000, 10, nil) node1 := test.BuildTestNode("node1", 2000, 3000, 10, nil)
node2 := test.BuildTestNode("node2", 2000, 3000, 10, func(node *v1.Node) { node2 := test.BuildTestNode("node2", 2000, 3000, 10, func(node *v1.Node) {
node.Spec.Taints = []v1.Taint{ node.Spec.Taints = []v1.Taint{
@@ -114,115 +113,139 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
} }
} }
var uint3 uint = 3
tests := []struct { tests := []struct {
description string description string
nodes []*v1.Node nodes []*v1.Node
strategy api.DeschedulerStrategy strategy api.DeschedulerStrategy
expectedEvictedPodCount int expectedEvictedPodCount uint
maxPodsToEvictPerNode int maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
}{ }{
{ {
description: "All pods have total restarts under threshold, no pod evictions", description: "All pods have total restarts under threshold, no pod evictions",
strategy: createStrategy(true, true, 10000, false), strategy: createStrategy(true, true, 10000, false),
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: 0,
}, },
{ {
description: "Some pods have total restarts bigger than threshold", description: "Some pods have total restarts bigger than threshold",
strategy: createStrategy(true, true, 1, false), strategy: createStrategy(true, true, 1, false),
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 6, expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
}, },
{ {
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions", description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
strategy: createStrategy(true, true, 1*25, false), strategy: createStrategy(true, true, 1*25, false),
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 6, expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
}, },
{ {
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions", description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions",
strategy: createStrategy(true, false, 1*25, false), strategy: createStrategy(true, false, 1*25, false),
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 5, expectedEvictedPodCount: 5,
maxPodsToEvictPerNode: 0,
}, },
{ {
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions", description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
strategy: createStrategy(true, true, 1*20, false), strategy: createStrategy(true, true, 1*20, false),
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 6, expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
}, },
{ {
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions", description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions",
strategy: createStrategy(true, false, 1*20, false), strategy: createStrategy(true, false, 1*20, false),
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 6, expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
}, },
{ {
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction", description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
strategy: createStrategy(true, true, 5*25+1, false), strategy: createStrategy(true, true, 5*25+1, false),
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
maxPodsToEvictPerNode: 0,
}, },
{ {
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction", description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
strategy: createStrategy(true, false, 5*20+1, false), strategy: createStrategy(true, false, 5*20+1, false),
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1, expectedEvictedPodCount: 1,
maxPodsToEvictPerNode: 0,
}, },
{ {
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions", description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions",
strategy: createStrategy(true, true, 1, false), strategy: createStrategy(true, true, 1, false),
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3, expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: 3, maxPodsToEvictPerNode: &uint3,
},
{
description: "All pods have total restarts equals threshold(maxNoOfPodsToEvictPerNamespace=3), 3 pod evictions",
strategy: createStrategy(true, true, 1, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
maxNoOfPodsToEvictPerNamespace: &uint3,
}, },
{ {
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tained, 0 pod evictions", description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tained, 0 pod evictions",
strategy: createStrategy(true, true, 1, true), strategy: createStrategy(true, true, 1, true),
nodes: []*v1.Node{node1, node2}, nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: 3, maxPodsToEvictPerNode: &uint3,
}, },
{ {
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions", description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions",
strategy: createStrategy(true, true, 1, true), strategy: createStrategy(true, true, 1, true),
nodes: []*v1.Node{node1, node3}, nodes: []*v1.Node{node1, node3},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: 3, maxPodsToEvictPerNode: &uint3,
}, },
} }
for _, tc := range tests { for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
fakeClient := &fake.Clientset{} ctx, cancel := context.WithCancel(context.Background())
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { defer cancel()
return true, &v1.PodList{Items: pods}, nil
var objs []runtime.Object
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes,
false,
false,
false,
false,
false,
)
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
}
}) })
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.nodes,
false,
false,
false,
)
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
}
} }
} }

View File

@@ -22,17 +22,18 @@ import (
"math" "math"
"sort" "sort"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets" utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/validation"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
) )
@@ -47,70 +48,24 @@ type topology struct {
pods []*v1.Pod pods []*v1.Pod
} }
// topologySpreadStrategyParams contains validated strategy parameters
type topologySpreadStrategyParams struct {
thresholdPriority int32
includedNamespaces sets.String
excludedNamespaces sets.String
labelSelector labels.Selector
nodeFit bool
}
// validateAndParseTopologySpreadParams will validate parameters to ensure that they do not contain invalid values.
func validateAndParseTopologySpreadParams(ctx context.Context, client clientset.Interface, params *api.StrategyParameters) (*topologySpreadStrategyParams, error) {
var includedNamespaces, excludedNamespaces sets.String
if params == nil {
return &topologySpreadStrategyParams{includedNamespaces: includedNamespaces, excludedNamespaces: excludedNamespaces}, nil
}
// At most one of include/exclude can be set
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
return nil, fmt.Errorf("only one of Include/Exclude namespaces can be set")
}
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
return nil, fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
}
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, params)
if err != nil {
return nil, fmt.Errorf("failed to get threshold priority from strategy's params: %+v", err)
}
if params.Namespaces != nil {
includedNamespaces = sets.NewString(params.Namespaces.Include...)
excludedNamespaces = sets.NewString(params.Namespaces.Exclude...)
}
var selector labels.Selector
if params.LabelSelector != nil {
selector, err = metav1.LabelSelectorAsSelector(params.LabelSelector)
if err != nil {
return nil, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
}
}
return &topologySpreadStrategyParams{
thresholdPriority: thresholdPriority,
includedNamespaces: includedNamespaces,
excludedNamespaces: excludedNamespaces,
labelSelector: selector,
nodeFit: params.NodeFit,
}, nil
}
func RemovePodsViolatingTopologySpreadConstraint( func RemovePodsViolatingTopologySpreadConstraint(
ctx context.Context, ctx context.Context,
client clientset.Interface, client clientset.Interface,
strategy api.DeschedulerStrategy, strategy api.DeschedulerStrategy,
nodes []*v1.Node, nodes []*v1.Node,
podEvictor *evictions.PodEvictor, podEvictor *evictions.PodEvictor,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) { ) {
strategyParams, err := validateAndParseTopologySpreadParams(ctx, client, strategy.Params) strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, strategy.Params)
if err != nil { if err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingTopologySpreadConstraint parameters") klog.ErrorS(err, "Invalid RemovePodsViolatingTopologySpreadConstraint parameters")
return return
} }
evictable := podEvictor.Evictable( evictable := podEvictor.Evictable(
evictions.WithPriorityThreshold(strategyParams.thresholdPriority), evictions.WithPriorityThreshold(strategyParams.ThresholdPriority),
evictions.WithNodeFit(strategyParams.nodeFit), evictions.WithNodeFit(strategyParams.NodeFit),
evictions.WithLabelSelector(strategyParams.labelSelector), evictions.WithLabelSelector(strategyParams.LabelSelector),
) )
nodeMap := make(map[string]*v1.Node, len(nodes)) nodeMap := make(map[string]*v1.Node, len(nodes))
@@ -140,8 +95,8 @@ func RemovePodsViolatingTopologySpreadConstraint(
podsForEviction := make(map[*v1.Pod]struct{}) podsForEviction := make(map[*v1.Pod]struct{})
// 1. for each namespace... // 1. for each namespace...
for _, namespace := range namespaces.Items { for _, namespace := range namespaces.Items {
if (len(strategyParams.includedNamespaces) > 0 && !strategyParams.includedNamespaces.Has(namespace.Name)) || if (len(strategyParams.IncludedNamespaces) > 0 && !strategyParams.IncludedNamespaces.Has(namespace.Name)) ||
(len(strategyParams.excludedNamespaces) > 0 && strategyParams.excludedNamespaces.Has(namespace.Name)) { (len(strategyParams.ExcludedNamespaces) > 0 && strategyParams.ExcludedNamespaces.Has(namespace.Name)) {
continue continue
} }
namespacePods, err := client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{}) namespacePods, err := client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{})
@@ -186,6 +141,10 @@ func RemovePodsViolatingTopologySpreadConstraint(
// (this loop is where we count the number of pods per topologyValue that match this constraint's selector) // (this loop is where we count the number of pods per topologyValue that match this constraint's selector)
var sumPods float64 var sumPods float64
for i := range namespacePods.Items { for i := range namespacePods.Items {
// skip pods that are being deleted.
if utils.IsPodTerminating(&namespacePods.Items[i]) {
continue
}
// 4. if the pod matches this TopologySpreadConstraint LabelSelector // 4. if the pod matches this TopologySpreadConstraint LabelSelector
if !selector.Matches(labels.Set(namespacePods.Items[i].Labels)) { if !selector.Matches(labels.Set(namespacePods.Items[i].Labels)) {
continue continue
@@ -281,7 +240,7 @@ func balanceDomains(
j := len(sortedDomains) - 1 j := len(sortedDomains) - 1
for i < j { for i < j {
// if j has no more to give without falling below the ideal average, move to next aboveAvg // if j has no more to give without falling below the ideal average, move to next aboveAvg
if float64(len(sortedDomains[j].pods)) < idealAvg { if float64(len(sortedDomains[j].pods)) <= idealAvg {
j-- j--
} }

View File

@@ -5,23 +5,23 @@ import (
"fmt" "fmt"
"testing" "testing"
"sigs.k8s.io/descheduler/pkg/api"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
func TestTopologySpreadConstraint(t *testing.T) { func TestTopologySpreadConstraint(t *testing.T) {
ctx := context.Background()
testCases := []struct { testCases := []struct {
name string name string
pods []*v1.Pod pods []*v1.Pod
expectedEvictedCount int expectedEvictedCount uint
nodes []*v1.Node nodes []*v1.Node
strategy api.DeschedulerStrategy strategy api.DeschedulerStrategy
namespaces []string namespaces []string
@@ -588,6 +588,62 @@ func TestTopologySpreadConstraint(t *testing.T) {
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{IncludeSoftConstraints: true}}, strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{IncludeSoftConstraints: true}},
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
}, },
{
name: "3 domains size [8 7 0], maxSkew=1, should move 5 to get [5 5 5]",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneC" }),
},
pods: createTestPods([]testPodList{
{
count: 8,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
{
count: 7,
node: "n2",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
}),
expectedEvictedCount: 5,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
},
{
name: "3 domains size [5 5 5], maxSkew=1, should move 0 to retain [5 5 5]",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneC" }),
},
pods: createTestPods([]testPodList{
{
count: 5,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
{
count: 5,
node: "n2",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
{
count: 5,
node: "n3",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
},
{ {
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod since pod tolerates the node with taint", name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod since pod tolerates the node with taint",
nodes: []*v1.Node{ nodes: []*v1.Node{
@@ -776,33 +832,83 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
}, },
{
name: "2 domains, sizes [4,2], maxSkew=1, 2 pods in termination; nothing should be moved",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
},
pods: createTestPods([]testPodList{
{
count: 2,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
{
count: 2,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
deletionTimestamp: &metav1.Time{},
},
{
count: 2,
node: "n2",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
LabelSelector: getLabelSelector("foo", []string{"bar"}, metav1.LabelSelectorOpIn),
},
},
namespaces: []string{"ns1"},
},
} }
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
fakeClient := &fake.Clientset{} ctx, cancel := context.WithCancel(context.Background())
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { defer cancel()
podList := make([]v1.Pod, 0, len(tc.pods))
for _, pod := range tc.pods { var objs []runtime.Object
podList = append(podList, *pod) for _, node := range tc.nodes {
} objs = append(objs, node)
return true, &v1.PodList{Items: podList}, nil }
}) for _, pod := range tc.pods {
fakeClient.Fake.AddReactor("list", "namespaces", func(action core.Action) (bool, runtime.Object, error) { objs = append(objs, pod)
return true, &v1.NamespaceList{Items: []v1.Namespace{{ObjectMeta: metav1.ObjectMeta{Name: "ns1", Namespace: "ns1"}}}}, nil }
}) objs = append(objs, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns1"}})
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
"v1", "v1",
false, false,
100, nil,
nil,
tc.nodes, tc.nodes,
false, false,
false, false,
false, false,
false,
false,
) )
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor) RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
podsEvicted := podEvictor.TotalEvicted() podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != tc.expectedEvictedCount { if podsEvicted != tc.expectedEvictedCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted) t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted)
@@ -812,14 +918,15 @@ func TestTopologySpreadConstraint(t *testing.T) {
} }
type testPodList struct { type testPodList struct {
count int count int
node string node string
labels map[string]string labels map[string]string
constraints []v1.TopologySpreadConstraint constraints []v1.TopologySpreadConstraint
nodeSelector map[string]string nodeSelector map[string]string
nodeAffinity *v1.Affinity nodeAffinity *v1.Affinity
noOwners bool noOwners bool
tolerations []v1.Toleration tolerations []v1.Toleration
deletionTimestamp *metav1.Time
} }
func createTestPods(testPods []testPodList) []*v1.Pod { func createTestPods(testPods []testPodList) []*v1.Pod {
@@ -840,6 +947,7 @@ func createTestPods(testPods []testPodList) []*v1.Pod {
p.Spec.NodeSelector = tp.nodeSelector p.Spec.NodeSelector = tp.nodeSelector
p.Spec.Affinity = tp.nodeAffinity p.Spec.Affinity = tp.nodeAffinity
p.Spec.Tolerations = tp.tolerations p.Spec.Tolerations = tp.tolerations
p.ObjectMeta.DeletionTimestamp = tp.deletionTimestamp
})) }))
podNum++ podNum++
} }

View File

@@ -0,0 +1,71 @@
package validation
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/utils"
)
// ValidatedStrategyParams contains validated common strategy parameters
type ValidatedStrategyParams struct {
ThresholdPriority int32
IncludedNamespaces sets.String
ExcludedNamespaces sets.String
LabelSelector labels.Selector
NodeFit bool
}
func DefaultValidatedStrategyParams() ValidatedStrategyParams {
return ValidatedStrategyParams{ThresholdPriority: utils.SystemCriticalPriority}
}
func ValidateAndParseStrategyParams(
ctx context.Context,
client clientset.Interface,
params *api.StrategyParameters,
) (*ValidatedStrategyParams, error) {
if params == nil {
defaultValidatedStrategyParams := DefaultValidatedStrategyParams()
return &defaultValidatedStrategyParams, nil
}
// At most one of include/exclude can be set
var includedNamespaces, excludedNamespaces sets.String
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
return nil, fmt.Errorf("only one of Include/Exclude namespaces can be set")
}
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
return nil, fmt.Errorf("only one of ThresholdPriority and thresholdPriorityClassName can be set")
}
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, params)
if err != nil {
return nil, fmt.Errorf("failed to get threshold priority from strategy's params: %+v", err)
}
if params.Namespaces != nil {
includedNamespaces = sets.NewString(params.Namespaces.Include...)
excludedNamespaces = sets.NewString(params.Namespaces.Exclude...)
}
var selector labels.Selector
if params.LabelSelector != nil {
selector, err = metav1.LabelSelectorAsSelector(params.LabelSelector)
if err != nil {
return nil, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
}
}
return &ValidatedStrategyParams{
ThresholdPriority: thresholdPriority,
IncludedNamespaces: includedNamespaces,
ExcludedNamespaces: excludedNamespaces,
LabelSelector: selector,
NodeFit: params.NodeFit,
}, nil
}

View File

@@ -0,0 +1,79 @@
package validation
import (
"context"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"sigs.k8s.io/descheduler/pkg/api"
)
var (
thresholdPriority int32 = 1000
)
func TestValidStrategyParams(t *testing.T) {
ctx := context.Background()
fakeClient := &fake.Clientset{}
testCases := []struct {
name string
params *api.StrategyParameters
}{
{name: "validate nil params", params: nil},
{name: "validate empty params", params: &api.StrategyParameters{}},
{name: "validate params with NodeFit", params: &api.StrategyParameters{NodeFit: true}},
{name: "validate params with ThresholdPriority", params: &api.StrategyParameters{ThresholdPriority: &thresholdPriority}},
{name: "validate params with priorityClassName", params: &api.StrategyParameters{ThresholdPriorityClassName: "high-priority"}},
{name: "validate params with excluded namespace", params: &api.StrategyParameters{Namespaces: &api.Namespaces{Exclude: []string{"excluded-ns"}}}},
{name: "validate params with included namespace", params: &api.StrategyParameters{Namespaces: &api.Namespaces{Include: []string{"include-ns"}}}},
{name: "validate params with empty label selector", params: &api.StrategyParameters{LabelSelector: &metav1.LabelSelector{}}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
params, err := ValidateAndParseStrategyParams(ctx, fakeClient, tc.params)
if err != nil {
t.Errorf("strategy params should be valid but got err: %v", err.Error())
}
if params == nil {
t.Errorf("strategy params should return a strategyParams but got nil")
}
})
}
}
func TestInvalidStrategyParams(t *testing.T) {
ctx := context.Background()
fakeClient := &fake.Clientset{}
testCases := []struct {
name string
params *api.StrategyParameters
}{
{
name: "invalid params with both included and excluded namespaces nil params",
params: &api.StrategyParameters{Namespaces: &api.Namespaces{Include: []string{"include-ns"}, Exclude: []string{"exclude-ns"}}},
},
{
name: "invalid params with both threshold priority and priority class name",
params: &api.StrategyParameters{ThresholdPriorityClassName: "high-priority", ThresholdPriority: &thresholdPriority},
},
{
name: "invalid params with bad label selector",
params: &api.StrategyParameters{LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"": "missing-label"}}},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
params, err := ValidateAndParseStrategyParams(ctx, fakeClient, tc.params)
if err == nil {
t.Errorf("strategy params should be invalid but did not get err")
}
if params != nil {
t.Errorf("strategy params should return a nil strategyParams but got %v", params)
}
})
}
}

View File

@@ -89,6 +89,11 @@ func IsMirrorPod(pod *v1.Pod) bool {
return ok return ok
} }
// IsPodTerminating returns true if the pod DeletionTimestamp is set.
func IsPodTerminating(pod *v1.Pod) bool {
return pod.DeletionTimestamp != nil
}
// IsStaticPod returns true if the pod is a static pod. // IsStaticPod returns true if the pod is a static pod.
func IsStaticPod(pod *v1.Pod) bool { func IsStaticPod(pod *v1.Pod) bool {
source, err := GetPodSource(pod) source, err := GetPodSource(pod)

View File

@@ -0,0 +1,203 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"strings"
"testing"
"time"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
)
func TestRemoveDuplicates(t *testing.T) {
ctx := context.Background()
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing node with %v", err)
}
nodes, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
t.Log("Creating testing namespace")
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
t.Log("Creating duplicates pods")
deploymentObj := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "duplicate-pod",
Namespace: testNamespace.Name,
Labels: map[string]string{"app": "test-duplicate", "name": "test-duplicatePods"},
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "test-duplicate", "name": "test-duplicatePods"},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app": "test-duplicate", "name": "test-duplicatePods"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "pause",
ImagePullPolicy: "Always",
Image: "kubernetes/pause",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
}},
},
},
},
}
tests := []struct {
description string
replicasNum int
beforeFunc func(deployment *appsv1.Deployment)
expectedEvictedPodCount uint
}{
{
description: "Evict Pod even Pods schedule to specific node",
replicasNum: 4,
beforeFunc: func(deployment *appsv1.Deployment) {
deployment.Spec.Replicas = func(i int32) *int32 { return &i }(4)
deployment.Spec.Template.Spec.NodeName = workerNodes[0].Name
},
expectedEvictedPodCount: 2,
},
{
description: "Evict Pod even Pods with local storage",
replicasNum: 5,
beforeFunc: func(deployment *appsv1.Deployment) {
deployment.Spec.Replicas = func(i int32) *int32 { return &i }(5)
deployment.Spec.Template.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
},
expectedEvictedPodCount: 2,
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
t.Logf("Creating deployment %v in %v namespace", deploymentObj.Name, deploymentObj.Namespace)
tc.beforeFunc(deploymentObj)
_, err = clientSet.AppsV1().Deployments(deploymentObj.Namespace).Create(ctx, deploymentObj, metav1.CreateOptions{})
if err != nil {
t.Logf("Error creating deployment: %v", err)
if err = clientSet.AppsV1().Deployments(deploymentObj.Namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"app": "test-duplicate", "name": "test-duplicatePods"})).String(),
}); err != nil {
t.Fatalf("Unable to delete deployment: %v", err)
}
return
}
defer clientSet.AppsV1().Deployments(deploymentObj.Namespace).Delete(ctx, deploymentObj.Name, metav1.DeleteOptions{})
waitForPodsRunning(ctx, t, clientSet, map[string]string{"app": "test-duplicate", "name": "test-duplicatePods"}, tc.replicasNum, testNamespace.Name)
// Run DeschedulerStrategy strategy
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("Error creating eviction policy group %v", err)
}
podEvictor := evictions.NewPodEvictor(
clientSet,
evictionPolicyGroupVersion,
false,
nil,
nil,
nodes,
true,
false,
false,
false,
false,
)
t.Log("Running DeschedulerStrategy strategy")
strategies.RemoveDuplicatePods(
ctx,
clientSet,
deschedulerapi.DeschedulerStrategy{
Enabled: true,
Params: &deschedulerapi.StrategyParameters{
RemoveDuplicates: &deschedulerapi.RemoveDuplicates{},
},
},
workerNodes,
podEvictor,
getPodsAssignedToNode,
)
waitForTerminatingPodsToDisappear(ctx, t, clientSet, testNamespace.Name)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Unexpected number of pods have been evicted, got %v, expected %v", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
}
})
}
}
func waitForPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, desireRunningPodNum int, namespace string) {
if err := wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) {
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labelMap).String(),
})
if err != nil {
return false, err
}
if len(podList.Items) != desireRunningPodNum {
t.Logf("Waiting for %v pods to be running, got %v instead", desireRunningPodNum, len(podList.Items))
return false, nil
}
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodRunning {
t.Logf("Pod %v not running yet, is %v instead", pod.Name, pod.Status.Phase)
return false, nil
}
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for pods running: %v", err)
}
}

View File

@@ -0,0 +1,156 @@
package e2e
import (
"context"
"strings"
"testing"
"time"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
)
var oneHourPodLifetimeSeconds uint = 3600
func TestFailedPods(t *testing.T) {
ctx := context.Background()
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing node with %v", err)
}
nodes, _ := splitNodesAndWorkerNodes(nodeList.Items)
t.Log("Creating testing namespace")
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
testCases := map[string]struct {
expectedEvictedCount uint
strategyParams *deschedulerapi.StrategyParameters
}{
"test-failed-pods-nil-strategy": {
expectedEvictedCount: 1,
strategyParams: nil,
},
"test-failed-pods-default-strategy": {
expectedEvictedCount: 1,
strategyParams: &deschedulerapi.StrategyParameters{},
},
"test-failed-pods-default-failed-pods": {
expectedEvictedCount: 1,
strategyParams: &deschedulerapi.StrategyParameters{
FailedPods: &deschedulerapi.FailedPods{},
},
},
"test-failed-pods-reason-unmatched": {
expectedEvictedCount: 0,
strategyParams: &deschedulerapi.StrategyParameters{
FailedPods: &deschedulerapi.FailedPods{Reasons: []string{"ReasonDoesNotMatch"}},
},
},
"test-failed-pods-min-age-unmet": {
expectedEvictedCount: 0,
strategyParams: &deschedulerapi.StrategyParameters{
FailedPods: &deschedulerapi.FailedPods{MinPodLifetimeSeconds: &oneHourPodLifetimeSeconds},
},
},
"test-failed-pods-exclude-job-kind": {
expectedEvictedCount: 0,
strategyParams: &deschedulerapi.StrategyParameters{
FailedPods: &deschedulerapi.FailedPods{ExcludeOwnerKinds: []string{"Job"}},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
job := initFailedJob(name, testNamespace.Namespace)
t.Logf("Creating job %s in %s namespace", job.Name, job.Namespace)
jobClient := clientSet.BatchV1().Jobs(testNamespace.Name)
if _, err := jobClient.Create(ctx, job, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating Job %s: %v", name, err)
}
deletePropagationPolicy := metav1.DeletePropagationForeground
defer jobClient.Delete(ctx, job.Name, metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy})
waitForJobPodPhase(ctx, t, clientSet, job, v1.PodFailed)
podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
t.Logf("Running RemoveFailedPods strategy for %s", name)
strategies.RemoveFailedPods(
ctx,
clientSet,
deschedulerapi.DeschedulerStrategy{
Enabled: true,
Params: tc.strategyParams,
},
nodes,
podEvictor,
getPodsAssignedToNode,
)
t.Logf("Finished RemoveFailedPods strategy for %s", name)
if actualEvictedCount := podEvictor.TotalEvicted(); actualEvictedCount == tc.expectedEvictedCount {
t.Logf("Total of %d Pods were evicted for %s", actualEvictedCount, name)
} else {
t.Errorf("Unexpected number of pods have been evicted, got %v, expected %v", actualEvictedCount, tc.expectedEvictedCount)
}
})
}
}
func initFailedJob(name, namespace string) *batchv1.Job {
podSpec := MakePodSpec("", nil)
podSpec.Containers[0].Command = []string{"/bin/false"}
podSpec.RestartPolicy = v1.RestartPolicyNever
labelsSet := labels.Set{"test": name, "name": name}
jobBackoffLimit := int32(0)
return &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Labels: labelsSet,
Name: name,
Namespace: namespace,
},
Spec: batchv1.JobSpec{
Template: v1.PodTemplateSpec{
Spec: podSpec,
ObjectMeta: metav1.ObjectMeta{Labels: labelsSet},
},
BackoffLimit: &jobBackoffLimit,
},
}
}
func waitForJobPodPhase(ctx context.Context, t *testing.T, clientSet clientset.Interface, job *batchv1.Job, phase v1.PodPhase) {
podClient := clientSet.CoreV1().Pods(job.Namespace)
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
t.Log(labels.FormatLabels(job.Labels))
if podList, err := podClient.List(ctx, metav1.ListOptions{LabelSelector: labels.FormatLabels(job.Labels)}); err != nil {
return false, err
} else {
if len(podList.Items) == 0 {
t.Logf("Job controller has not created Pod for job %s yet", job.Name)
return false, nil
}
for _, pod := range podList.Items {
if pod.Status.Phase != phase {
t.Logf("Pod %v not in %s phase yet, is %v instead", pod.Name, phase, pod.Status.Phase)
return false, nil
}
}
t.Logf("Job %v Pod is in %s phase now", job.Name, phase)
return true, nil
}
}); err != nil {
t.Fatalf("Error waiting for pods in %s phase: %v", phase, err)
}
}

View File

@@ -62,11 +62,11 @@ func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
Resources: v1.ResourceRequirements{ Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{ Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"), v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("1000Mi"), v1.ResourceMemory: resource.MustParse("200Mi"),
}, },
Requests: v1.ResourceList{ Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"), v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("800Mi"), v1.ResourceMemory: resource.MustParse("100Mi"),
}, },
}, },
}}, }},
@@ -75,7 +75,7 @@ func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
} }
} }
// RcByNameContainer returns a ReplicationControoler with specified name and container // RcByNameContainer returns a ReplicationController with specified name and container
func RcByNameContainer(name, namespace string, replicas int32, labels map[string]string, gracePeriod *int64, priorityClassName string) *v1.ReplicationController { func RcByNameContainer(name, namespace string, replicas int32, labels map[string]string, gracePeriod *int64, priorityClassName string) *v1.ReplicationController {
zeroGracePeriod := int64(0) zeroGracePeriod := int64(0)
@@ -108,7 +108,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
} }
} }
func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInformer, chan struct{}) { func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInformer, podutil.GetPodsAssignedToNodeFunc, chan struct{}) {
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG")) clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"))
if err != nil { if err != nil {
t.Errorf("Error during client creation with %v", err) t.Errorf("Error during client creation with %v", err)
@@ -117,12 +117,41 @@ func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInfo
stopChannel := make(chan struct{}) stopChannel := make(chan struct{})
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0) sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(stopChannel) sharedInformerFactory.Start(stopChannel)
sharedInformerFactory.WaitForCacheSync(stopChannel) sharedInformerFactory.WaitForCacheSync(stopChannel)
nodeInformer := sharedInformerFactory.Core().V1().Nodes() waitForNodesReady(context.Background(), t, clientSet, nodeInformer)
return clientSet, nodeInformer, getPodsAssignedToNode, stopChannel
}
return clientSet, nodeInformer, stopChannel func waitForNodesReady(ctx context.Context, t *testing.T, clientSet clientset.Interface, nodeInformer coreinformers.NodeInformer) {
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
}
readyNodes, err := nodeutil.ReadyNodes(ctx, clientSet, nodeInformer, "")
if err != nil {
return false, err
}
if len(nodeList.Items) != len(readyNodes) {
t.Logf("%v/%v nodes are ready. Waiting for all nodes to be ready...", len(readyNodes), len(nodeList.Items))
return false, nil
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for nodes to be ready: %v", err)
}
} }
func runPodLifetimeStrategy( func runPodLifetimeStrategy(
@@ -135,6 +164,7 @@ func runPodLifetimeStrategy(
priority *int32, priority *int32,
evictCritical bool, evictCritical bool,
labelSelector *metav1.LabelSelector, labelSelector *metav1.LabelSelector,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) { ) {
// Run descheduler. // Run descheduler.
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset) evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
@@ -166,12 +196,16 @@ func runPodLifetimeStrategy(
clientset, clientset,
evictionPolicyGroupVersion, evictionPolicyGroupVersion,
false, false,
0, nil,
nil,
nodes, nodes,
false, false,
evictCritical, evictCritical,
false, false,
false,
false,
), ),
getPodsAssignedToNode,
) )
} }
@@ -201,7 +235,7 @@ func intersectStrings(lista, listb []string) []string {
func TestLowNodeUtilization(t *testing.T) { func TestLowNodeUtilization(t *testing.T) {
ctx := context.Background() ctx := context.Background()
clientSet, _, stopCh := initializeClient(t) clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh) defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
@@ -209,15 +243,7 @@ func TestLowNodeUtilization(t *testing.T) {
t.Errorf("Error listing node with %v", err) t.Errorf("Error listing node with %v", err)
} }
var nodes []*v1.Node nodes, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
var workerNodes []*v1.Node
for i := range nodeList.Items {
node := nodeList.Items[i]
nodes = append(nodes, &node)
if _, exists := node.Labels["node-role.kubernetes.io/master"]; !exists {
workerNodes = append(workerNodes, &node)
}
}
t.Log("Creating testing namespace") t.Log("Creating testing namespace")
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}} testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -298,22 +324,14 @@ func TestLowNodeUtilization(t *testing.T) {
waitForRCPodsRunning(ctx, t, clientSet, rc) waitForRCPodsRunning(ctx, t, clientSet, rc)
// Run LowNodeUtilization strategy // Run LowNodeUtilization strategy
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet) podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("%v", err)
}
podEvictor := evictions.NewPodEvictor(
clientSet,
evictionPolicyGroupVersion,
false,
0,
nodes,
true,
false,
false,
)
podsOnMosttUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, workerNodes[0], podutil.WithFilter(podEvictor.Evictable().IsEvictable)) podFilter, err := podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc()
if err != nil {
t.Errorf("Error initializing pod filter function, %v", err)
}
podsOnMosttUtilizedNode, err := podutil.ListPodsOnANode(workerNodes[0].Name, getPodsAssignedToNode, podFilter)
if err != nil { if err != nil {
t.Errorf("Error listing pods on a node %v", err) t.Errorf("Error listing pods on a node %v", err)
} }
@@ -338,11 +356,17 @@ func TestLowNodeUtilization(t *testing.T) {
}, },
workerNodes, workerNodes,
podEvictor, podEvictor,
getPodsAssignedToNode,
) )
waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace) waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)
podsOnMosttUtilizedNode, err = podutil.ListPodsOnANode(ctx, clientSet, workerNodes[0], podutil.WithFilter(podEvictor.Evictable().IsEvictable)) podFilter, err = podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc()
if err != nil {
t.Errorf("Error initializing pod filter function, %v", err)
}
podsOnMosttUtilizedNode, err = podutil.ListPodsOnANode(workerNodes[0].Name, getPodsAssignedToNode, podFilter)
if err != nil { if err != nil {
t.Errorf("Error listing pods on a node %v", err) t.Errorf("Error listing pods on a node %v", err)
} }
@@ -359,7 +383,7 @@ func TestLowNodeUtilization(t *testing.T) {
func TestNamespaceConstraintsInclude(t *testing.T) { func TestNamespaceConstraintsInclude(t *testing.T) {
ctx := context.Background() ctx := context.Background()
clientSet, nodeInformer, stopCh := initializeClient(t) clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh) defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}} testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -394,7 +418,7 @@ func TestNamespaceConstraintsInclude(t *testing.T) {
t.Logf("set the strategy to delete pods from %v namespace", rc.Namespace) t.Logf("set the strategy to delete pods from %v namespace", rc.Namespace)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{ runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{
Include: []string{rc.Namespace}, Include: []string{rc.Namespace},
}, "", nil, false, nil) }, "", nil, false, nil, getPodsAssignedToNode)
// All pods are supposed to be deleted, wait until all the old pods are deleted // All pods are supposed to be deleted, wait until all the old pods are deleted
if err := wait.PollImmediate(time.Second, 20*time.Second, func() (bool, error) { if err := wait.PollImmediate(time.Second, 20*time.Second, func() (bool, error) {
@@ -430,7 +454,7 @@ func TestNamespaceConstraintsInclude(t *testing.T) {
func TestNamespaceConstraintsExclude(t *testing.T) { func TestNamespaceConstraintsExclude(t *testing.T) {
ctx := context.Background() ctx := context.Background()
clientSet, nodeInformer, stopCh := initializeClient(t) clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh) defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}} testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -465,7 +489,7 @@ func TestNamespaceConstraintsExclude(t *testing.T) {
t.Logf("set the strategy to delete pods from namespaces except the %v namespace", rc.Namespace) t.Logf("set the strategy to delete pods from namespaces except the %v namespace", rc.Namespace)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{ runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{
Exclude: []string{rc.Namespace}, Exclude: []string{rc.Namespace},
}, "", nil, false, nil) }, "", nil, false, nil, getPodsAssignedToNode)
t.Logf("Waiting 10s") t.Logf("Waiting 10s")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
@@ -497,7 +521,7 @@ func testEvictSystemCritical(t *testing.T, isPriorityClass bool) {
var lowPriority = int32(500) var lowPriority = int32(500)
ctx := context.Background() ctx := context.Background()
clientSet, nodeInformer, stopCh := initializeClient(t) clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh) defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}} testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -578,9 +602,9 @@ func testEvictSystemCritical(t *testing.T, isPriorityClass bool) {
t.Logf("Existing pods: %v", initialPodNames) t.Logf("Existing pods: %v", initialPodNames)
if isPriorityClass { if isPriorityClass {
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, true, nil) runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, true, nil, getPodsAssignedToNode)
} else { } else {
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, true, nil) runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, true, nil, getPodsAssignedToNode)
} }
// All pods are supposed to be deleted, wait until all pods in the test namespace are terminating // All pods are supposed to be deleted, wait until all pods in the test namespace are terminating
@@ -627,7 +651,7 @@ func testPriority(t *testing.T, isPriorityClass bool) {
var lowPriority = int32(500) var lowPriority = int32(500)
ctx := context.Background() ctx := context.Background()
clientSet, nodeInformer, stopCh := initializeClient(t) clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh) defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}} testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -697,10 +721,10 @@ func testPriority(t *testing.T, isPriorityClass bool) {
if isPriorityClass { if isPriorityClass {
t.Logf("set the strategy to delete pods with priority lower than priority class %s", highPriorityClass.Name) t.Logf("set the strategy to delete pods with priority lower than priority class %s", highPriorityClass.Name)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, false, nil) runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, false, nil, getPodsAssignedToNode)
} else { } else {
t.Logf("set the strategy to delete pods with priority lower than %d", highPriority) t.Logf("set the strategy to delete pods with priority lower than %d", highPriority)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, false, nil) runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, false, nil, getPodsAssignedToNode)
} }
t.Logf("Waiting 10s") t.Logf("Waiting 10s")
@@ -756,7 +780,7 @@ func testPriority(t *testing.T, isPriorityClass bool) {
func TestPodLabelSelector(t *testing.T) { func TestPodLabelSelector(t *testing.T) {
ctx := context.Background() ctx := context.Background()
clientSet, nodeInformer, stopCh := initializeClient(t) clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh) defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}} testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -804,7 +828,7 @@ func TestPodLabelSelector(t *testing.T) {
t.Logf("Pods not expected to be evicted: %v, pods expected to be evicted: %v", expectReservePodNames, expectEvictPodNames) t.Logf("Pods not expected to be evicted: %v, pods expected to be evicted: %v", expectReservePodNames, expectEvictPodNames)
t.Logf("set the strategy to delete pods with label test:podlifetime-evict") t.Logf("set the strategy to delete pods with label test:podlifetime-evict")
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}}) runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}}, getPodsAssignedToNode)
t.Logf("Waiting 10s") t.Logf("Waiting 10s")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
@@ -859,7 +883,7 @@ func TestPodLabelSelector(t *testing.T) {
func TestEvictAnnotation(t *testing.T) { func TestEvictAnnotation(t *testing.T) {
ctx := context.Background() ctx := context.Background()
clientSet, nodeInformer, stopCh := initializeClient(t) clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh) defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
@@ -914,7 +938,7 @@ func TestEvictAnnotation(t *testing.T) {
t.Logf("Existing pods: %v", initialPodNames) t.Logf("Existing pods: %v", initialPodNames)
t.Log("Running PodLifetime strategy") t.Log("Running PodLifetime strategy")
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, nil) runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, nil, getPodsAssignedToNode)
if err := wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) { if err := wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
podList, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()}) podList, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
@@ -1288,3 +1312,36 @@ func waitForPodRunning(ctx context.Context, t *testing.T, clientSet clientset.In
t.Fatalf("Error waiting for pod running: %v", err) t.Fatalf("Error waiting for pod running: %v", err)
} }
} }
func splitNodesAndWorkerNodes(nodes []v1.Node) ([]*v1.Node, []*v1.Node) {
var allNodes []*v1.Node
var workerNodes []*v1.Node
for i := range nodes {
node := nodes[i]
allNodes = append(allNodes, &node)
if _, exists := node.Labels["node-role.kubernetes.io/master"]; !exists {
workerNodes = append(workerNodes, &node)
}
}
return allNodes, workerNodes
}
func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, nodes []*v1.Node) *evictions.PodEvictor {
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("Error creating eviction policy group: %v", err)
}
return evictions.NewPodEvictor(
clientSet,
evictionPolicyGroupVersion,
false,
nil,
nil,
nodes,
true,
false,
false,
false,
false,
)
}

View File

@@ -0,0 +1,196 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"strings"
"testing"
"time"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
)
func TestTooManyRestarts(t *testing.T) {
ctx := context.Background()
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing node with %v", err)
}
nodes, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
t.Logf("Creating testing namespace %v", t.Name())
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
deploymentObj := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "restart-pod",
Namespace: testNamespace.Name,
Labels: map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"},
},
Spec: appsv1.DeploymentSpec{
Replicas: func(i int32) *int32 { return &i }(4),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "pause",
ImagePullPolicy: "Always",
Image: "kubernetes/pause",
Command: []string{"/bin/sh"},
Args: []string{"-c", "sleep 1s && exit 1"},
Ports: []v1.ContainerPort{{ContainerPort: 80}},
}},
},
},
},
}
t.Logf("Creating deployment %v", deploymentObj.Name)
_, err = clientSet.AppsV1().Deployments(deploymentObj.Namespace).Create(ctx, deploymentObj, metav1.CreateOptions{})
if err != nil {
t.Logf("Error creating deployment: %v", err)
if err = clientSet.AppsV1().Deployments(deploymentObj.Namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"})).String(),
}); err != nil {
t.Fatalf("Unable to delete deployment: %v", err)
}
return
}
defer clientSet.AppsV1().Deployments(deploymentObj.Namespace).Delete(ctx, deploymentObj.Name, metav1.DeleteOptions{})
// Need to wait restartCount more than 4
result, err := waitPodRestartCount(ctx, clientSet, testNamespace.Name, t)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !result {
t.Fatal("Pod restart count not as expected")
}
tests := []struct {
name string
podRestartThreshold int32
includingInitContainers bool
expectedEvictedPodCount uint
}{
{
name: "test-no-evictions",
podRestartThreshold: int32(10000),
includingInitContainers: true,
expectedEvictedPodCount: 0,
},
{
name: "test-one-evictions",
podRestartThreshold: int32(4),
includingInitContainers: true,
expectedEvictedPodCount: 4,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("Error creating eviction policy group: %v", err)
}
podEvictor := evictions.NewPodEvictor(
clientSet,
evictionPolicyGroupVersion,
false,
nil,
nil,
nodes,
true,
false,
false,
false,
false,
)
// Run RemovePodsHavingTooManyRestarts strategy
t.Log("Running RemovePodsHavingTooManyRestarts strategy")
strategies.RemovePodsHavingTooManyRestarts(
ctx,
clientSet,
deschedulerapi.DeschedulerStrategy{
Enabled: true,
Params: &deschedulerapi.StrategyParameters{
PodsHavingTooManyRestarts: &deschedulerapi.PodsHavingTooManyRestarts{
PodRestartThreshold: tc.podRestartThreshold,
IncludingInitContainers: tc.includingInitContainers,
},
},
},
workerNodes,
podEvictor,
getPodsAssignedToNode,
)
waitForTerminatingPodsToDisappear(ctx, t, clientSet, testNamespace.Name)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount < tc.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Unexpected number of pods have been evicted, got %v, expected %v", tc.name, actualEvictedPodCount, tc.expectedEvictedPodCount)
}
})
}
}
func waitPodRestartCount(ctx context.Context, clientSet clientset.Interface, namespace string, t *testing.T) (bool, error) {
timeout := time.After(5 * time.Minute)
tick := time.Tick(5 * time.Second)
for {
select {
case <-timeout:
t.Log("Timeout, still restart count not as expected")
return false, fmt.Errorf("timeout Error")
case <-tick:
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"})).String(),
})
if podList.Items[0].Status.ContainerStatuses[0].RestartCount >= 4 && podList.Items[1].Status.ContainerStatuses[0].RestartCount >= 4 && podList.Items[2].Status.ContainerStatuses[0].RestartCount >= 4 && podList.Items[3].Status.ContainerStatuses[0].RestartCount >= 4 {
t.Log("Pod restartCount as expected")
return true, nil
}
if err != nil {
t.Fatalf("Unexpected err: %v", err)
return false, err
}
}
}
}

View File

@@ -0,0 +1,159 @@
package e2e
import (
"context"
"fmt"
"math"
"strings"
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
)
const zoneTopologyKey string = "topology.kubernetes.io/zone"
func TestTopologySpreadConstraint(t *testing.T) {
ctx := context.Background()
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing node with %v", err)
}
nodes, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
t.Log("Creating testing namespace")
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
testCases := map[string]struct {
replicaCount int
maxSkew int
labelKey string
labelValue string
constraint v1.UnsatisfiableConstraintAction
}{
"test-rc-topology-spread-hard-constraint": {
replicaCount: 4,
maxSkew: 1,
labelKey: "test",
labelValue: "topology-spread-hard-constraint",
constraint: v1.DoNotSchedule,
},
"test-rc-topology-spread-soft-constraint": {
replicaCount: 4,
maxSkew: 1,
labelKey: "test",
labelValue: "topology-spread-soft-constraint",
constraint: v1.ScheduleAnyway,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
t.Logf("Creating RC %s with %d replicas", name, tc.replicaCount)
rc := RcByNameContainer(name, testNamespace.Name, int32(tc.replicaCount), map[string]string{tc.labelKey: tc.labelValue}, nil, "")
rc.Spec.Template.Spec.TopologySpreadConstraints = makeTopologySpreadConstraints(tc.maxSkew, tc.labelKey, tc.labelValue, tc.constraint)
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating RC %s %v", name, err)
}
defer deleteRC(ctx, t, clientSet, rc)
waitForRCPodsRunning(ctx, t, clientSet, rc)
// Create a "Violator" RC that has the same label and is forced to be on the same node using a nodeSelector
violatorRcName := name + "-violator"
violatorCount := tc.maxSkew + 1
violatorRc := RcByNameContainer(violatorRcName, testNamespace.Name, int32(violatorCount), map[string]string{tc.labelKey: tc.labelValue}, nil, "")
violatorRc.Spec.Template.Spec.NodeSelector = map[string]string{zoneTopologyKey: workerNodes[0].Labels[zoneTopologyKey]}
rc.Spec.Template.Spec.TopologySpreadConstraints = makeTopologySpreadConstraints(tc.maxSkew, tc.labelKey, tc.labelValue, tc.constraint)
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, violatorRc, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating RC %s: %v", violatorRcName, err)
}
defer deleteRC(ctx, t, clientSet, violatorRc)
waitForRCPodsRunning(ctx, t, clientSet, violatorRc)
podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
// Run TopologySpreadConstraint strategy
t.Logf("Running RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
strategies.RemovePodsViolatingTopologySpreadConstraint(
ctx,
clientSet,
deschedulerapi.DeschedulerStrategy{
Enabled: true,
Params: &deschedulerapi.StrategyParameters{
IncludeSoftConstraints: tc.constraint != v1.DoNotSchedule,
},
},
nodes,
podEvictor,
getPodsAssignedToNode,
)
t.Logf("Finished RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
t.Logf("Wait for terminating pods of %s to disappear", name)
waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)
if totalEvicted := podEvictor.TotalEvicted(); totalEvicted > 0 {
t.Logf("Total of %d Pods were evicted for %s", totalEvicted, name)
} else {
t.Fatalf("Pods were not evicted for %s TopologySpreadConstraint", name)
}
// Ensure recently evicted Pod are rescheduled and running before asserting for a balanced topology spread
waitForRCPodsRunning(ctx, t, clientSet, rc)
pods, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", tc.labelKey, tc.labelValue)})
if err != nil {
t.Errorf("Error listing pods for %s: %v", name, err)
}
nodePodCountMap := make(map[string]int)
for _, pod := range pods.Items {
nodePodCountMap[pod.Spec.NodeName]++
}
if len(nodePodCountMap) != len(workerNodes) {
t.Errorf("%s Pods were scheduled on only '%d' nodes and were not properly distributed on the nodes", name, len(nodePodCountMap))
}
min, max := getMinAndMaxPodDistribution(nodePodCountMap)
if max-min > tc.maxSkew {
t.Errorf("Pod distribution for %s is still violating the max skew of %d as it is %d", name, tc.maxSkew, max-min)
}
t.Logf("Pods for %s were distributed in line with max skew of %d", name, tc.maxSkew)
})
}
}
func makeTopologySpreadConstraints(maxSkew int, labelKey, labelValue string, constraint v1.UnsatisfiableConstraintAction) []v1.TopologySpreadConstraint {
return []v1.TopologySpreadConstraint{
{
MaxSkew: int32(maxSkew),
TopologyKey: zoneTopologyKey,
WhenUnsatisfiable: constraint,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{labelKey: labelValue}},
},
}
}
func getMinAndMaxPodDistribution(nodePodCountMap map[string]int) (int, int) {
min := math.MaxInt32
max := math.MinInt32
for _, podCount := range nodePodCountMap {
if podCount < min {
min = podCount
}
if podCount > max {
max = podCount
}
}
return min, max
}

View File

@@ -1,3 +1,5 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors. # Copyright 2017 The Kubernetes Authors.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,8 +14,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
#!/bin/bash
set -x set -x
set -o errexit set -o errexit
set -o nounset set -o nounset

View File

@@ -1,3 +1,5 @@
#!/usr/bin/env bash
# Copyright 2021 The Kubernetes Authors. # Copyright 2021 The Kubernetes Authors.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,12 +14,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
#!/bin/bash
set -o errexit set -o errexit
set -o nounset set -o nounset
set -o pipefail set -o pipefail
K8S_VERSION=${KUBERNETES_VERSION:-v1.21.1} K8S_VERSION=${KUBERNETES_VERSION:-v1.23.0}
IMAGE_REPO=${HELM_IMAGE_REPO:-descheduler} IMAGE_REPO=${HELM_IMAGE_REPO:-descheduler}
IMAGE_TAG=${HELM_IMAGE_TAG:-helm-test} IMAGE_TAG=${HELM_IMAGE_TAG:-helm-test}
CHART_LOCATION=${HELM_CHART_LOCATION:-./charts/descheduler} CHART_LOCATION=${HELM_CHART_LOCATION:-./charts/descheduler}
@@ -28,8 +29,8 @@ mv kind-linux-amd64 kind
export PATH=$PATH:$PWD export PATH=$PATH:$PWD
kind create cluster --image kindest/node:"${K8S_VERSION}" --config=./hack/kind_config.yaml kind create cluster --image kindest/node:"${K8S_VERSION}" --config=./hack/kind_config.yaml
kind load docker-image descheduler:helm-test kind load docker-image descheduler:helm-test
helm install descheduler-ci --set image.repository="${IMAGE_REPO}",image.tag="${IMAGE_TAG}" --namespace kube-system "${CHART_LOCATION}" helm install descheduler-ci --set image.repository="${IMAGE_REPO}",image.tag="${IMAGE_TAG}",schedule="* * * * *" --namespace kube-system "${CHART_LOCATION}"
sleep 20s sleep 61 # sleep until Job is triggered
helm test descheduler-ci --namespace kube-system helm test descheduler-ci --namespace kube-system
# Delete kind cluster once test is finished # Delete kind cluster once test is finished

View File

@@ -1,3 +1,5 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors. # Copyright 2017 The Kubernetes Authors.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,8 +14,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
#!/bin/bash
set -x set -x
set -o errexit set -o errexit
set -o nounset set -o nounset

View File

@@ -140,7 +140,7 @@ func testOnGCE() bool {
}() }()
go func() { go func() {
addrs, err := net.LookupHost("metadata.google.internal") addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal")
if err != nil || len(addrs) == 0 { if err != nil || len(addrs) == 0 {
resc <- false resc <- false
return return
@@ -296,6 +296,7 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
// being stable anyway. // being stable anyway.
host = metadataIP host = metadataIP
} }
suffix = strings.TrimLeft(suffix, "/")
u := "http://" + host + "/computeMetadata/v1/" + suffix u := "http://" + host + "/computeMetadata/v1/" + suffix
req, err := http.NewRequest("GET", u, nil) req, err := http.NewRequest("GET", u, nil)
if err != nil { if err != nil {

View File

@@ -1,12 +0,0 @@
module github.com/Azure/go-autorest/autorest/adal
go 1.12
require (
github.com/Azure/go-autorest v14.2.0+incompatible
github.com/Azure/go-autorest/autorest/date v0.3.0
github.com/Azure/go-autorest/autorest/mocks v0.4.1
github.com/Azure/go-autorest/tracing v0.6.0
github.com/form3tech-oss/jwt-go v3.2.2+incompatible
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0
)

View File

@@ -1,19 +0,0 @@
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

View File

@@ -28,6 +28,7 @@ const (
mimeTypeFormPost = "application/x-www-form-urlencoded" mimeTypeFormPost = "application/x-www-form-urlencoded"
) )
// DO NOT ACCESS THIS DIRECTLY. go through sender()
var defaultSender Sender var defaultSender Sender
var defaultSenderInit = &sync.Once{} var defaultSenderInit = &sync.Once{}

View File

@@ -30,11 +30,13 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/Azure/go-autorest/autorest/date" "github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/logger"
"github.com/form3tech-oss/jwt-go" "github.com/form3tech-oss/jwt-go"
) )
@@ -69,13 +71,22 @@ const (
defaultMaxMSIRefreshAttempts = 5 defaultMaxMSIRefreshAttempts = 5
// asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions
asMSIEndpointEnv = "MSI_ENDPOINT" msiEndpointEnv = "MSI_ENDPOINT"
// asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions
asMSISecretEnv = "MSI_SECRET" msiSecretEnv = "MSI_SECRET"
// the API version to use for the App Service MSI endpoint // the API version to use for the legacy App Service MSI endpoint
appServiceAPIVersion = "2017-09-01" appServiceAPIVersion2017 = "2017-09-01"
// secret header used when authenticating against app service MSI endpoint
secretHeader = "Secret"
// the format for expires_on in UTC with AM/PM
expiresOnDateFormatPM = "1/2/2006 15:04:05 PM +00:00"
// the format for expires_on in UTC without AM/PM
expiresOnDateFormat = "1/2/2006 15:04:05 +00:00"
) )
// OAuthTokenProvider is an interface which should be implemented by an access token retriever // OAuthTokenProvider is an interface which should be implemented by an access token retriever
@@ -282,6 +293,8 @@ func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) {
// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. // ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension.
type ServicePrincipalMSISecret struct { type ServicePrincipalMSISecret struct {
msiType msiType
clientResourceID string
} }
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. // SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
@@ -652,94 +665,173 @@ func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clie
) )
} }
type msiType int
const (
msiTypeUnavailable msiType = iota
msiTypeAppServiceV20170901
msiTypeCloudShell
msiTypeIMDS
)
func (m msiType) String() string {
switch m {
case msiTypeUnavailable:
return "unavailable"
case msiTypeAppServiceV20170901:
return "AppServiceV20170901"
case msiTypeCloudShell:
return "CloudShell"
case msiTypeIMDS:
return "IMDS"
default:
return fmt.Sprintf("unhandled MSI type %d", m)
}
}
// returns the MSI type and endpoint, or an error
func getMSIType() (msiType, string, error) {
if endpointEnvVar := os.Getenv(msiEndpointEnv); endpointEnvVar != "" {
// if the env var MSI_ENDPOINT is set
if secretEnvVar := os.Getenv(msiSecretEnv); secretEnvVar != "" {
// if BOTH the env vars MSI_ENDPOINT and MSI_SECRET are set the msiType is AppService
return msiTypeAppServiceV20170901, endpointEnvVar, nil
}
// if ONLY the env var MSI_ENDPOINT is set the msiType is CloudShell
return msiTypeCloudShell, endpointEnvVar, nil
} else if msiAvailableHook(context.Background(), sender()) {
// if MSI_ENDPOINT is NOT set AND the IMDS endpoint is available the msiType is IMDS. This will timeout after 500 milliseconds
return msiTypeIMDS, msiEndpoint, nil
} else {
// if MSI_ENDPOINT is NOT set and IMDS endpoint is not available Managed Identity is not available
return msiTypeUnavailable, "", errors.New("MSI not available")
}
}
// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. // GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines.
// NOTE: this always returns the IMDS endpoint, it does not work for app services or cloud shell.
// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint.
func GetMSIVMEndpoint() (string, error) { func GetMSIVMEndpoint() (string, error) {
return msiEndpoint, nil return msiEndpoint, nil
} }
// NOTE: this only indicates if the ASE environment credentials have been set // GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions.
// which does not necessarily mean that the caller is authenticating via ASE! // It will return an error when not running in an app service/functions environment.
func isAppService() bool { // Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint.
_, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv)
_, asMSISecretEnvExists := os.LookupEnv(asMSISecretEnv)
return asMSIEndpointEnvExists && asMSISecretEnvExists
}
// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions
func GetMSIAppServiceEndpoint() (string, error) { func GetMSIAppServiceEndpoint() (string, error) {
asMSIEndpoint, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv) msiType, endpoint, err := getMSIType()
if err != nil {
if asMSIEndpointEnvExists { return "", err
return asMSIEndpoint, nil }
switch msiType {
case msiTypeAppServiceV20170901:
return endpoint, nil
default:
return "", fmt.Errorf("%s is not app service environment", msiType)
} }
return "", errors.New("MSI endpoint not found")
} }
// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment // GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment
// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint.
func GetMSIEndpoint() (string, error) { func GetMSIEndpoint() (string, error) {
if isAppService() { _, endpoint, err := getMSIType()
return GetMSIAppServiceEndpoint() return endpoint, err
}
return GetMSIVMEndpoint()
} }
// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. // NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension.
// It will use the system assigned identity when creating the token. // It will use the system assigned identity when creating the token.
// msiEndpoint - empty string, or pass a non-empty string to override the default value.
// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead.
func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, nil, callbacks...) return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", "", callbacks...)
} }
// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. // NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension.
// It will use the clientID of specified user assigned identity when creating the token. // It will use the clientID of specified user assigned identity when creating the token.
// msiEndpoint - empty string, or pass a non-empty string to override the default value.
// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead.
func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, nil, callbacks...) if err := validateStringParam(userAssignedID, "userAssignedID"); err != nil {
return nil, err
}
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, "", callbacks...)
} }
// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension. // NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension.
// It will use the azure resource id of user assigned identity when creating the token. // It will use the azure resource id of user assigned identity when creating the token.
// msiEndpoint - empty string, or pass a non-empty string to override the default value.
// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead.
func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, &identityResourceID, callbacks...) if err := validateStringParam(identityResourceID, "identityResourceID"); err != nil {
}
func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, identityResourceID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil {
return nil, err return nil, err
} }
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", identityResourceID, callbacks...)
}
// ManagedIdentityOptions contains optional values for configuring managed identity authentication.
type ManagedIdentityOptions struct {
// ClientID is the user-assigned identity to use during authentication.
// It is mutually exclusive with IdentityResourceID.
ClientID string
// IdentityResourceID is the resource ID of the user-assigned identity to use during authentication.
// It is mutually exclusive with ClientID.
IdentityResourceID string
}
// NewServicePrincipalTokenFromManagedIdentity creates a ServicePrincipalToken using a managed identity.
// It supports the following managed identity environments.
// - App Service Environment (API version 2017-09-01 only)
// - Cloud shell
// - IMDS with a system or user assigned identity
func NewServicePrincipalTokenFromManagedIdentity(resource string, options *ManagedIdentityOptions, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if options == nil {
options = &ManagedIdentityOptions{}
}
return newServicePrincipalTokenFromMSI("", resource, options.ClientID, options.IdentityResourceID, callbacks...)
}
func newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateStringParam(resource, "resource"); err != nil { if err := validateStringParam(resource, "resource"); err != nil {
return nil, err return nil, err
} }
if userAssignedID != nil { if userAssignedID != "" && identityResourceID != "" {
if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { return nil, errors.New("cannot specify userAssignedID and identityResourceID")
return nil, err
}
} }
if identityResourceID != nil { msiType, endpoint, err := getMSIType()
if err := validateStringParam(*identityResourceID, "identityResourceID"); err != nil { if err != nil {
return nil, err logger.Instance.Writef(logger.LogError, "Error determining managed identity environment: %v", err)
} return nil, err
} }
// We set the oauth config token endpoint to be MSI's endpoint logger.Instance.Writef(logger.LogInfo, "Managed identity environment is %s, endpoint is %s", msiType, endpoint)
msiEndpointURL, err := url.Parse(msiEndpoint) if msiEndpoint != "" {
endpoint = msiEndpoint
logger.Instance.Writef(logger.LogInfo, "Managed identity custom endpoint is %s", endpoint)
}
msiEndpointURL, err := url.Parse(endpoint)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// cloud shell sends its data in the request body
v := url.Values{} if msiType != msiTypeCloudShell {
v.Set("resource", resource) v := url.Values{}
// App Service MSI currently only supports token API version 2017-09-01 v.Set("resource", resource)
if isAppService() { clientIDParam := "client_id"
v.Set("api-version", appServiceAPIVersion) switch msiType {
} else { case msiTypeAppServiceV20170901:
v.Set("api-version", msiAPIVersion) clientIDParam = "clientid"
v.Set("api-version", appServiceAPIVersion2017)
break
case msiTypeIMDS:
v.Set("api-version", msiAPIVersion)
}
if userAssignedID != "" {
v.Set(clientIDParam, userAssignedID)
} else if identityResourceID != "" {
v.Set("mi_res_id", identityResourceID)
}
msiEndpointURL.RawQuery = v.Encode()
} }
if userAssignedID != nil {
v.Set("client_id", *userAssignedID)
}
if identityResourceID != nil {
v.Set("mi_res_id", *identityResourceID)
}
msiEndpointURL.RawQuery = v.Encode()
spt := &ServicePrincipalToken{ spt := &ServicePrincipalToken{
inner: servicePrincipalToken{ inner: servicePrincipalToken{
@@ -747,10 +839,14 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
OauthConfig: OAuthConfig{ OauthConfig: OAuthConfig{
TokenEndpoint: *msiEndpointURL, TokenEndpoint: *msiEndpointURL,
}, },
Secret: &ServicePrincipalMSISecret{}, Secret: &ServicePrincipalMSISecret{
msiType: msiType,
clientResourceID: identityResourceID,
},
Resource: resource, Resource: resource,
AutoRefresh: true, AutoRefresh: true,
RefreshWithin: defaultRefresh, RefreshWithin: defaultRefresh,
ClientID: userAssignedID,
}, },
refreshLock: &sync.RWMutex{}, refreshLock: &sync.RWMutex{},
sender: sender(), sender: sender(),
@@ -758,10 +854,6 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts,
} }
if userAssignedID != nil {
spt.inner.ClientID = *userAssignedID
}
return spt, nil return spt, nil
} }
@@ -858,31 +950,6 @@ func (spt *ServicePrincipalToken) getGrantType() string {
} }
} }
func isIMDS(u url.URL) bool {
return isMSIEndpoint(u) == true || isASEEndpoint(u) == true
}
func isMSIEndpoint(endpoint url.URL) bool {
msi, err := url.Parse(msiEndpoint)
if err != nil {
return false
}
return endpoint.Host == msi.Host && endpoint.Path == msi.Path
}
func isASEEndpoint(endpoint url.URL) bool {
aseEndpoint, err := GetMSIAppServiceEndpoint()
if err != nil {
// app service environment isn't enabled
return false
}
ase, err := url.Parse(aseEndpoint)
if err != nil {
return false
}
return endpoint.Host == ase.Host && endpoint.Path == ase.Path
}
func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error {
if spt.customRefreshFunc != nil { if spt.customRefreshFunc != nil {
token, err := spt.customRefreshFunc(ctx, resource) token, err := spt.customRefreshFunc(ctx, resource)
@@ -892,19 +959,45 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
spt.inner.Token = *token spt.inner.Token = *token
return spt.InvokeRefreshCallbacks(spt.inner.Token) return spt.InvokeRefreshCallbacks(spt.inner.Token)
} }
req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil)
if err != nil { if err != nil {
return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
} }
req.Header.Add("User-Agent", UserAgent()) req.Header.Add("User-Agent", UserAgent())
// Add header when runtime is on App Service or Functions
if isASEEndpoint(spt.inner.OauthConfig.TokenEndpoint) {
asMSISecret, _ := os.LookupEnv(asMSISecretEnv)
req.Header.Add("Secret", asMSISecret)
}
req = req.WithContext(ctx) req = req.WithContext(ctx)
if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { var resp *http.Response
authBodyFilter := func(b []byte) []byte {
if logger.Level() != logger.LogAuth {
return []byte("**REDACTED** authentication body")
}
return b
}
if msiSecret, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok {
switch msiSecret.msiType {
case msiTypeAppServiceV20170901:
req.Method = http.MethodGet
req.Header.Set("secret", os.Getenv(msiSecretEnv))
break
case msiTypeCloudShell:
req.Header.Set("Metadata", "true")
data := url.Values{}
data.Set("resource", spt.inner.Resource)
if spt.inner.ClientID != "" {
data.Set("client_id", spt.inner.ClientID)
} else if msiSecret.clientResourceID != "" {
data.Set("msi_res_id", msiSecret.clientResourceID)
}
req.Body = ioutil.NopCloser(strings.NewReader(data.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
break
case msiTypeIMDS:
req.Method = http.MethodGet
req.Header.Set("Metadata", "true")
break
}
logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter})
resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts)
} else {
v := url.Values{} v := url.Values{}
v.Set("client_id", spt.inner.ClientID) v.Set("client_id", spt.inner.ClientID)
v.Set("resource", resource) v.Set("resource", resource)
@@ -933,40 +1026,26 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
req.ContentLength = int64(len(s)) req.ContentLength = int64(len(s))
req.Header.Set(contentType, mimeTypeFormPost) req.Header.Set(contentType, mimeTypeFormPost)
req.Body = body req.Body = body
} logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter})
if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok {
req.Method = http.MethodGet
req.Header.Set(metadataHeader, "true")
}
var resp *http.Response
if isMSIEndpoint(spt.inner.OauthConfig.TokenEndpoint) {
resp, err = getMSIEndpoint(ctx, spt.sender)
if err != nil {
// return a TokenRefreshError here so that we don't keep retrying
return newTokenRefreshError(fmt.Sprintf("the MSI endpoint is not available. Failed HTTP request to MSI endpoint: %v", err), nil)
}
resp.Body.Close()
}
if isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts)
} else {
resp, err = spt.sender.Do(req) resp, err = spt.sender.Do(req)
} }
// don't return a TokenRefreshError here; this will allow retry logic to apply
if err != nil { if err != nil {
// don't return a TokenRefreshError here; this will allow retry logic to apply
return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err)
} else if resp == nil {
return fmt.Errorf("adal: received nil response and error")
} }
logger.Instance.WriteResponse(resp, logger.Filter{Body: authBodyFilter})
defer resp.Body.Close() defer resp.Body.Close()
rb, err := ioutil.ReadAll(resp.Body) rb, err := ioutil.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
if err != nil { if err != nil {
return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp) return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v Endpoint %s", resp.StatusCode, err, req.URL.String()), resp)
} }
return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s Endpoint %s", resp.StatusCode, string(rb), req.URL.String()), resp)
} }
// for the following error cases don't return a TokenRefreshError. the operation succeeded // for the following error cases don't return a TokenRefreshError. the operation succeeded
@@ -979,15 +1058,60 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
if len(strings.Trim(string(rb), " ")) == 0 { if len(strings.Trim(string(rb), " ")) == 0 {
return fmt.Errorf("adal: Empty service principal token received during refresh") return fmt.Errorf("adal: Empty service principal token received during refresh")
} }
var token Token token := struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
// AAD returns expires_in as a string, ADFS returns it as an int
ExpiresIn json.Number `json:"expires_in"`
// expires_on can be in two formats, a UTC time stamp or the number of seconds.
ExpiresOn string `json:"expires_on"`
NotBefore json.Number `json:"not_before"`
Resource string `json:"resource"`
Type string `json:"token_type"`
}{}
// return a TokenRefreshError in the follow error cases as the token is in an unexpected format
err = json.Unmarshal(rb, &token) err = json.Unmarshal(rb, &token)
if err != nil { if err != nil {
return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) return newTokenRefreshError(fmt.Sprintf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)), resp)
} }
expiresOn := json.Number("")
// ADFS doesn't include the expires_on field
if token.ExpiresOn != "" {
if expiresOn, err = parseExpiresOn(token.ExpiresOn); err != nil {
return newTokenRefreshError(fmt.Sprintf("adal: failed to parse expires_on: %v value '%s'", err, token.ExpiresOn), resp)
}
}
spt.inner.Token.AccessToken = token.AccessToken
spt.inner.Token.RefreshToken = token.RefreshToken
spt.inner.Token.ExpiresIn = token.ExpiresIn
spt.inner.Token.ExpiresOn = expiresOn
spt.inner.Token.NotBefore = token.NotBefore
spt.inner.Token.Resource = token.Resource
spt.inner.Token.Type = token.Type
spt.inner.Token = token return spt.InvokeRefreshCallbacks(spt.inner.Token)
}
return spt.InvokeRefreshCallbacks(token) // converts expires_on to the number of seconds
func parseExpiresOn(s string) (json.Number, error) {
// convert the expiration date to the number of seconds from now
timeToDuration := func(t time.Time) json.Number {
dur := t.Sub(time.Now().UTC())
return json.Number(strconv.FormatInt(int64(dur.Round(time.Second).Seconds()), 10))
}
if _, err := strconv.ParseInt(s, 10, 64); err == nil {
// this is the number of seconds case, no conversion required
return json.Number(s), nil
} else if eo, err := time.Parse(expiresOnDateFormatPM, s); err == nil {
return timeToDuration(eo), nil
} else if eo, err := time.Parse(expiresOnDateFormat, s); err == nil {
return timeToDuration(eo), nil
} else {
// unknown format
return json.Number(""), err
}
} }
// retry logic specific to retrieving a token from the IMDS endpoint // retry logic specific to retrieving a token from the IMDS endpoint
@@ -1118,46 +1242,6 @@ func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string {
return tokens return tokens
} }
// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh primary token: %v", err)
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.EnsureFreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh auxiliary token: %v", err)
}
}
return nil
}
// RefreshWithContext obtains a fresh token for the Service Principal.
func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh primary token: %v", err)
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.RefreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh auxiliary token: %v", err)
}
}
return nil
}
// RefreshExchangeWithContext refreshes the token, but for a different resource.
func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil {
return fmt.Errorf("failed to refresh primary token: %v", err)
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil {
return fmt.Errorf("failed to refresh auxiliary token: %v", err)
}
}
return nil
}
// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. // NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource.
func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) {
if err := validateStringParam(clientID, "clientID"); err != nil { if err := validateStringParam(clientID, "clientID"); err != nil {
@@ -1188,6 +1272,55 @@ func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig,
return &m, nil return &m, nil
} }
// NewMultiTenantServicePrincipalTokenFromCertificate creates a new MultiTenantServicePrincipalToken with the specified certificate credentials and resource.
func NewMultiTenantServicePrincipalTokenFromCertificate(multiTenantCfg MultiTenantOAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string) (*MultiTenantServicePrincipalToken, error) {
if err := validateStringParam(clientID, "clientID"); err != nil {
return nil, err
}
if err := validateStringParam(resource, "resource"); err != nil {
return nil, err
}
if certificate == nil {
return nil, fmt.Errorf("parameter 'certificate' cannot be nil")
}
if privateKey == nil {
return nil, fmt.Errorf("parameter 'privateKey' cannot be nil")
}
auxTenants := multiTenantCfg.AuxiliaryTenants()
m := MultiTenantServicePrincipalToken{
AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)),
}
primary, err := NewServicePrincipalTokenWithSecret(
*multiTenantCfg.PrimaryTenant(),
clientID,
resource,
&ServicePrincipalCertificateSecret{
PrivateKey: privateKey,
Certificate: certificate,
},
)
if err != nil {
return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err)
}
m.PrimaryToken = primary
for i := range auxTenants {
aux, err := NewServicePrincipalTokenWithSecret(
*auxTenants[i],
clientID,
resource,
&ServicePrincipalCertificateSecret{
PrivateKey: privateKey,
Certificate: certificate,
},
)
if err != nil {
return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err)
}
m.AuxiliaryTokens[i] = aux
}
return &m, nil
}
// MSIAvailable returns true if the MSI endpoint is available for authentication. // MSIAvailable returns true if the MSI endpoint is available for authentication.
func MSIAvailable(ctx context.Context, sender Sender) bool { func MSIAvailable(ctx context.Context, sender Sender) bool {
resp, err := getMSIEndpoint(ctx, sender) resp, err := getMSIEndpoint(ctx, sender)
@@ -1196,3 +1329,8 @@ func MSIAvailable(ctx context.Context, sender Sender) bool {
} }
return err == nil return err == nil
} }
// used for testing purposes
var msiAvailableHook = func(ctx context.Context, sender Sender) bool {
return MSIAvailable(ctx, sender)
}

View File

@@ -18,13 +18,12 @@ package adal
import ( import (
"context" "context"
"fmt"
"net/http" "net/http"
"time" "time"
) )
func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) {
// this cannot fail, the return sig is due to legacy reasons
msiEndpoint, _ := GetMSIVMEndpoint()
tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
defer cancel() defer cancel()
// http.NewRequestWithContext() was added in Go 1.13 // http.NewRequestWithContext() was added in Go 1.13
@@ -34,3 +33,43 @@ func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error)
req.URL.RawQuery = q.Encode() req.URL.RawQuery = q.Encode()
return sender.Do(req) return sender.Do(req)
} }
// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh primary token: %w", err)
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.EnsureFreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh auxiliary token: %w", err)
}
}
return nil
}
// RefreshWithContext obtains a fresh token for the Service Principal.
func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh primary token: %w", err)
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.RefreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh auxiliary token: %w", err)
}
}
return nil
}
// RefreshExchangeWithContext refreshes the token, but for a different resource.
func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil {
return fmt.Errorf("failed to refresh primary token: %w", err)
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil {
return fmt.Errorf("failed to refresh auxiliary token: %w", err)
}
}
return nil
}

View File

@@ -23,8 +23,6 @@ import (
) )
func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) {
// this cannot fail, the return sig is due to legacy reasons
msiEndpoint, _ := GetMSIVMEndpoint()
tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
defer cancel() defer cancel()
req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil) req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil)
@@ -34,3 +32,43 @@ func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error)
req.URL.RawQuery = q.Encode() req.URL.RawQuery = q.Encode()
return sender.Do(req) return sender.Do(req)
} }
// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil {
return err
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.EnsureFreshWithContext(ctx); err != nil {
return err
}
}
return nil
}
// RefreshWithContext obtains a fresh token for the Service Principal.
func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil {
return err
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.RefreshWithContext(ctx); err != nil {
return err
}
}
return nil
}
// RefreshExchangeWithContext refreshes the token, but for a different resource.
func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil {
return err
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil {
return err
}
}
return nil
}

View File

@@ -42,6 +42,52 @@ const (
var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK}
// FutureAPI contains the set of methods on the Future type.
type FutureAPI interface {
// Response returns the last HTTP response.
Response() *http.Response
// Status returns the last status message of the operation.
Status() string
// PollingMethod returns the method used to monitor the status of the asynchronous operation.
PollingMethod() PollingMethodType
// DoneWithContext queries the service to see if the operation has completed.
DoneWithContext(context.Context, autorest.Sender) (bool, error)
// GetPollingDelay returns a duration the application should wait before checking
// the status of the asynchronous request and true; this value is returned from
// the service via the Retry-After response header. If the header wasn't returned
// then the function returns the zero-value time.Duration and false.
GetPollingDelay() (time.Duration, bool)
// WaitForCompletionRef will return when one of the following conditions is met: the long
// running operation has completed, the provided context is cancelled, or the client's
// polling duration has been exceeded. It will retry failed polling attempts based on
// the retry value defined in the client up to the maximum retry attempts.
// If no deadline is specified in the context then the client.PollingDuration will be
// used to determine if a default deadline should be used.
// If PollingDuration is greater than zero the value will be used as the context's timeout.
// If PollingDuration is zero then no default deadline will be used.
WaitForCompletionRef(context.Context, autorest.Client) error
// MarshalJSON implements the json.Marshaler interface.
MarshalJSON() ([]byte, error)
// MarshalJSON implements the json.Unmarshaler interface.
UnmarshalJSON([]byte) error
// PollingURL returns the URL used for retrieving the status of the long-running operation.
PollingURL() string
// GetResult should be called once polling has completed successfully.
// It makes the final GET call to retrieve the resultant payload.
GetResult(autorest.Sender) (*http.Response, error)
}
var _ FutureAPI = (*Future)(nil)
// Future provides a mechanism to access the status and results of an asynchronous request. // Future provides a mechanism to access the status and results of an asynchronous request.
// Since futures are stateful they should be passed by value to avoid race conditions. // Since futures are stateful they should be passed by value to avoid race conditions.
type Future struct { type Future struct {

View File

@@ -37,6 +37,9 @@ const (
// should be included in the response. // should be included in the response.
HeaderReturnClientID = "x-ms-return-client-request-id" HeaderReturnClientID = "x-ms-return-client-request-id"
// HeaderContentType is the type of the content in the HTTP response.
HeaderContentType = "Content-Type"
// HeaderRequestID is the Azure extension header of the service generated request ID returned // HeaderRequestID is the Azure extension header of the service generated request ID returned
// in the response. // in the response.
HeaderRequestID = "x-ms-request-id" HeaderRequestID = "x-ms-request-id"
@@ -89,54 +92,85 @@ func (se ServiceError) Error() string {
// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. // UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type.
func (se *ServiceError) UnmarshalJSON(b []byte) error { func (se *ServiceError) UnmarshalJSON(b []byte) error {
// per the OData v4 spec the details field must be an array of JSON objects.
// unfortunately not all services adhear to the spec and just return a single
// object instead of an array with one object. so we have to perform some
// shenanigans to accommodate both cases.
// http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091
type serviceError1 struct { type serviceErrorInternal struct {
Code string `json:"code"` Code string `json:"code"`
Message string `json:"message"` Message string `json:"message"`
Target *string `json:"target"` Target *string `json:"target,omitempty"`
Details []map[string]interface{} `json:"details"` AdditionalInfo []map[string]interface{} `json:"additionalInfo,omitempty"`
InnerError map[string]interface{} `json:"innererror"` // not all services conform to the OData v4 spec.
AdditionalInfo []map[string]interface{} `json:"additionalInfo"` // the following fields are where we've seen discrepancies
// spec calls for []map[string]interface{} but have seen map[string]interface{}
Details interface{} `json:"details,omitempty"`
// spec calls for map[string]interface{} but have seen []map[string]interface{} and string
InnerError interface{} `json:"innererror,omitempty"`
} }
type serviceError2 struct { sei := serviceErrorInternal{}
Code string `json:"code"` if err := json.Unmarshal(b, &sei); err != nil {
Message string `json:"message"` return err
Target *string `json:"target"`
Details map[string]interface{} `json:"details"`
InnerError map[string]interface{} `json:"innererror"`
AdditionalInfo []map[string]interface{} `json:"additionalInfo"`
} }
se1 := serviceError1{} // copy the fields we know to be correct
err := json.Unmarshal(b, &se1) se.AdditionalInfo = sei.AdditionalInfo
if err == nil { se.Code = sei.Code
se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) se.Message = sei.Message
return nil se.Target = sei.Target
// converts an []interface{} to []map[string]interface{}
arrayOfObjs := func(v interface{}) ([]map[string]interface{}, bool) {
arrayOf, ok := v.([]interface{})
if !ok {
return nil, false
}
final := []map[string]interface{}{}
for _, item := range arrayOf {
as, ok := item.(map[string]interface{})
if !ok {
return nil, false
}
final = append(final, as)
}
return final, true
} }
se2 := serviceError2{} // convert the remaining fields, falling back to raw JSON if necessary
err = json.Unmarshal(b, &se2)
if err == nil {
se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo)
se.Details = append(se.Details, se2.Details)
return nil
}
return err
}
func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { if c, ok := arrayOfObjs(sei.Details); ok {
se.Code = code se.Details = c
se.Message = message } else if c, ok := sei.Details.(map[string]interface{}); ok {
se.Target = target se.Details = []map[string]interface{}{c}
se.Details = details } else if sei.Details != nil {
se.InnerError = inner // stuff into Details
se.AdditionalInfo = additional se.Details = []map[string]interface{}{
{"raw": sei.Details},
}
}
if c, ok := sei.InnerError.(map[string]interface{}); ok {
se.InnerError = c
} else if c, ok := arrayOfObjs(sei.InnerError); ok {
// if there's only one error extract it
if len(c) == 1 {
se.InnerError = c[0]
} else {
// multiple errors, stuff them into the value
se.InnerError = map[string]interface{}{
"multi": c,
}
}
} else if c, ok := sei.InnerError.(string); ok {
se.InnerError = map[string]interface{}{"error": c}
} else if sei.InnerError != nil {
// stuff into InnerError
se.InnerError = map[string]interface{}{
"raw": sei.InnerError,
}
}
return nil
} }
// RequestError describes an error response returned by Azure service. // RequestError describes an error response returned by Azure service.
@@ -307,16 +341,30 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
// Check if error is unwrapped ServiceError // Check if error is unwrapped ServiceError
decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes()))
if err := decoder.Decode(&e.ServiceError); err != nil { if err := decoder.Decode(&e.ServiceError); err != nil {
return err return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), err)
}
// for example, should the API return the literal value `null` as the response
if e.ServiceError == nil {
e.ServiceError = &ServiceError{
Code: "Unknown",
Message: "Unknown service error",
Details: []map[string]interface{}{
{
"HttpResponse.Body": b.String(),
},
},
}
} }
} }
if e.ServiceError.Message == "" {
if e.ServiceError != nil && e.ServiceError.Message == "" {
// if we're here it means the returned error wasn't OData v4 compliant. // if we're here it means the returned error wasn't OData v4 compliant.
// try to unmarshal the body in hopes of getting something. // try to unmarshal the body in hopes of getting something.
rawBody := map[string]interface{}{} rawBody := map[string]interface{}{}
decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes()))
if err := decoder.Decode(&rawBody); err != nil { if err := decoder.Decode(&rawBody); err != nil {
return err return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), err)
} }
e.ServiceError = &ServiceError{ e.ServiceError = &ServiceError{

View File

@@ -17,6 +17,7 @@ package autorest
import ( import (
"bytes" "bytes"
"crypto/tls" "crypto/tls"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -165,7 +166,8 @@ type Client struct {
// Setting this to zero will use the provided context to control the duration. // Setting this to zero will use the provided context to control the duration.
PollingDuration time.Duration PollingDuration time.Duration
// RetryAttempts sets the default number of retry attempts for client. // RetryAttempts sets the total number of times the client will attempt to make an HTTP request.
// Set the value to 1 to disable retries. DO NOT set the value to less than 1.
RetryAttempts int RetryAttempts int
// RetryDuration sets the delay duration for retries. // RetryDuration sets the delay duration for retries.
@@ -259,6 +261,9 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
}, },
}) })
resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r)
if resp == nil && err == nil {
err = errors.New("autorest: received nil response and error")
}
logger.Instance.WriteResponse(resp, logger.Filter{}) logger.Instance.WriteResponse(resp, logger.Filter{})
Respond(resp, c.ByInspecting()) Respond(resp, c.ByInspecting())
return resp, err return resp, err

View File

@@ -1,5 +0,0 @@
module github.com/Azure/go-autorest/autorest/date
go 1.12
require github.com/Azure/go-autorest v14.2.0+incompatible

View File

@@ -1,2 +0,0 @@
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=

Some files were not shown because too many files have changed in this diff Show More