mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Compare commits
152 Commits
deschedule
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
77d8b3d49a | ||
|
|
cddc15bd3a | ||
|
|
b59995eeb8 | ||
|
|
d998d82357 | ||
|
|
f51ea72ce0 | ||
|
|
fe8d4c0d21 | ||
|
|
3843a2d5d1 | ||
|
|
839a237d6a | ||
|
|
4cd1e66ef3 | ||
|
|
2f18864fa5 | ||
|
|
6e71068f4f | ||
|
|
e40620effa | ||
|
|
d7dc0abd7b | ||
|
|
012ca2398f | ||
|
|
f07089d7b3 | ||
|
|
a54b59f208 | ||
|
|
bfd5feaf60 | ||
|
|
41d46d0d3b | ||
|
|
646c13ae15 | ||
|
|
3b9d3d9719 | ||
|
|
449383caa3 | ||
|
|
31fd097c0a | ||
|
|
11143d5b2c | ||
|
|
8480e03e9c | ||
|
|
0397425010 | ||
|
|
5396282e3d | ||
|
|
a9ff644de6 | ||
|
|
fe8e17f72c | ||
|
|
a1709e9edd | ||
|
|
24c0ca2ef9 | ||
|
|
9b26abd538 | ||
|
|
fc83c13166 | ||
|
|
9b69962053 | ||
|
|
4edbecc85d | ||
|
|
54f67266bb | ||
|
|
2a3529c543 | ||
|
|
58408d710b | ||
|
|
161f66a12f | ||
|
|
6bde95c9a1 | ||
|
|
724ff8a188 | ||
|
|
feae158a50 | ||
|
|
780ac7a51e | ||
|
|
c4afb6bb30 | ||
|
|
8b5c4e805d | ||
|
|
f4e24a408f | ||
|
|
2781106d49 | ||
|
|
534a30a058 | ||
|
|
bb55741320 | ||
|
|
079bd6157b | ||
|
|
92cb1b23ed | ||
|
|
832facc526 | ||
|
|
c4fa6c472f | ||
|
|
a848dac3cf | ||
|
|
43a2ccf9c4 | ||
|
|
60cf3aeb95 | ||
|
|
84b174e841 | ||
|
|
40337d064d | ||
|
|
9fe585c854 | ||
|
|
4fce2ca2f1 | ||
|
|
4c11de0403 | ||
|
|
a9099efc45 | ||
|
|
6edb644f2e | ||
|
|
c239e1199f | ||
|
|
b713b7852a | ||
|
|
5d07d0c8e2 | ||
|
|
7076ba0760 | ||
|
|
81b816d4a4 | ||
|
|
9ebc909c7f | ||
|
|
af01b675b0 | ||
|
|
ce6ce5a058 | ||
|
|
bd4f6d4fcd | ||
|
|
6a4181158a | ||
|
|
a2746d09e8 | ||
|
|
b5d7219391 | ||
|
|
b09d5d99dc | ||
|
|
dbcc20f37f | ||
|
|
51340b56b8 | ||
|
|
160669817e | ||
|
|
6ca4479892 | ||
|
|
92740a25d4 | ||
|
|
56e4daccaf | ||
|
|
b546832b66 | ||
|
|
39e5b34af3 | ||
|
|
e699e08d13 | ||
|
|
af26b57e5e | ||
|
|
22fe589ae6 | ||
|
|
0a11b5a138 | ||
|
|
363f02710b | ||
|
|
6abfa232e7 | ||
|
|
bbfb12a120 | ||
|
|
5df2a0c516 | ||
|
|
8ecd14289a | ||
|
|
131ed42a4c | ||
|
|
6b8d4cd5a7 | ||
|
|
24a06511a2 | ||
|
|
09c7d1be0a | ||
|
|
f5666882de | ||
|
|
701f22404b | ||
|
|
d5fa60bdd5 | ||
|
|
9e14f733b7 | ||
|
|
29ade13ce7 | ||
|
|
03518badb8 | ||
|
|
24458fb0ca | ||
|
|
1c5b32763b | ||
|
|
3bd031bbb3 | ||
|
|
ea6e9f22b9 | ||
|
|
73309a3948 | ||
|
|
105313a0e3 | ||
|
|
d368cbed32 | ||
|
|
5f4dfbc922 | ||
|
|
967911e4c1 | ||
|
|
726091712d | ||
|
|
854afa7c73 | ||
|
|
2517268b1f | ||
|
|
fea8beabab | ||
|
|
7f9c95fa16 | ||
|
|
6f7e2a271e | ||
|
|
217ebdfa73 | ||
|
|
e014fda58e | ||
|
|
6d0360fd16 | ||
|
|
01a87b6143 | ||
|
|
f3e871492c | ||
|
|
bf8d744686 | ||
|
|
477d12968e | ||
|
|
dcb4136a96 | ||
|
|
2c0afafccf | ||
|
|
e56617253f | ||
|
|
500aaea4dd | ||
|
|
4444811f26 | ||
|
|
b63c6fac27 | ||
|
|
dfc76906d4 | ||
|
|
fbd17d4caf | ||
|
|
9c01589fb9 | ||
|
|
a0942afaa1 | ||
|
|
16fa21a4a6 | ||
|
|
241f1325c9 | ||
|
|
c1a63a557a | ||
|
|
e45e21368a | ||
|
|
f24b367479 | ||
|
|
8ba9cb1df7 | ||
|
|
d502f05910 | ||
|
|
241f47d947 | ||
|
|
19424f4119 | ||
|
|
635a40f305 | ||
|
|
1804d2e3a2 | ||
|
|
fea4870243 | ||
|
|
f54df67d11 | ||
|
|
1a998037f8 | ||
|
|
c481877c03 | ||
|
|
67df39690b | ||
|
|
674f14da78 | ||
|
|
0cfbdf642b |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -3,3 +3,4 @@ _tmp/
|
||||
vendordiff.patch
|
||||
.idea/
|
||||
*.code-workspace
|
||||
.vscode/
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.15.5
|
||||
FROM golang:1.16.14
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
@@ -21,7 +21,9 @@ RUN VERSION=${VERSION} make build.$ARCH
|
||||
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Avesh Agarwal <avesh.ncsu@gmail.com>
|
||||
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||
|
||||
USER 1000
|
||||
|
||||
COPY --from=0 /go/src/sigs.k8s.io/descheduler/_output/bin/descheduler /bin/descheduler
|
||||
|
||||
|
||||
@@ -13,7 +13,9 @@
|
||||
# limitations under the License.
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Avesh Agarwal <avesh.ncsu@gmail.com>
|
||||
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||
|
||||
USER 1000
|
||||
|
||||
COPY _output/bin/descheduler /bin/descheduler
|
||||
|
||||
|
||||
33
Makefile
33
Makefile
@@ -16,11 +16,13 @@
|
||||
|
||||
# VERSION is based on a date stamp plus the last commit
|
||||
VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags --match "v*")
|
||||
BRANCH?=$(shell git branch --show-current)
|
||||
SHA1?=$(shell git rev-parse HEAD)
|
||||
BUILD=$(shell date +%FT%T%z)
|
||||
LDFLAG_LOCATION=sigs.k8s.io/descheduler/cmd/descheduler/app
|
||||
ARCHS = amd64 arm64
|
||||
LDFLAG_LOCATION=sigs.k8s.io/descheduler/pkg/version
|
||||
ARCHS = amd64 arm arm64
|
||||
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD}"
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
|
||||
|
||||
GOLANGCI_VERSION := v1.30.0
|
||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint)
|
||||
@@ -51,6 +53,9 @@ build:
|
||||
build.amd64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||
|
||||
build.arm:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||
|
||||
build.arm64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||
|
||||
@@ -63,6 +68,9 @@ image:
|
||||
image.amd64:
|
||||
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE)-amd64 .
|
||||
|
||||
image.arm:
|
||||
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm" -t $(IMAGE)-arm .
|
||||
|
||||
image.arm64:
|
||||
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm64" -t $(IMAGE)-arm64 .
|
||||
|
||||
@@ -71,7 +79,7 @@ push: image
|
||||
docker tag $(IMAGE) $(IMAGE_GCLOUD)
|
||||
docker push $(IMAGE_GCLOUD)
|
||||
|
||||
push-all: image.amd64 image.arm64
|
||||
push-all: image.amd64 image.arm image.arm64
|
||||
gcloud auth configure-docker
|
||||
for arch in $(ARCHS); do \
|
||||
docker tag $(IMAGE)-$${arch} $(IMAGE_GCLOUD)-$${arch} ;\
|
||||
@@ -85,8 +93,12 @@ push-all: image.amd64 image.arm64
|
||||
|
||||
clean:
|
||||
rm -rf _output
|
||||
rm -rf _tmp
|
||||
|
||||
verify: verify-gofmt verify-vendor lint lint-chart verify-spelling
|
||||
verify: verify-govet verify-spelling verify-gofmt verify-vendor lint lint-chart verify-toc verify-gen
|
||||
|
||||
verify-govet:
|
||||
./hack/verify-govet.sh
|
||||
|
||||
verify-spelling:
|
||||
./hack/verify-spelling.sh
|
||||
@@ -97,6 +109,9 @@ verify-gofmt:
|
||||
verify-vendor:
|
||||
./hack/verify-vendor.sh
|
||||
|
||||
verify-toc:
|
||||
./hack/verify-toc.sh
|
||||
|
||||
test-unit:
|
||||
./test/run-unit-tests.sh
|
||||
|
||||
@@ -107,6 +122,11 @@ gen:
|
||||
./hack/update-generated-conversions.sh
|
||||
./hack/update-generated-deep-copies.sh
|
||||
./hack/update-generated-defaulters.sh
|
||||
./hack/update-toc.sh
|
||||
|
||||
verify-gen:
|
||||
./hack/verify-conversions.sh
|
||||
./hack/verify-deep-copies.sh
|
||||
|
||||
lint:
|
||||
ifndef HAS_GOLANGCI
|
||||
@@ -119,3 +139,6 @@ ifndef HAS_HELM
|
||||
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
|
||||
endif
|
||||
helm lint ./charts/descheduler
|
||||
|
||||
test-helm:
|
||||
./test/run-helm-tests.sh
|
||||
|
||||
10
OWNERS
10
OWNERS
@@ -1,8 +1,7 @@
|
||||
approvers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
- ravisantoshgudimetla
|
||||
- damemi
|
||||
- ingvagabund
|
||||
- seanmalloy
|
||||
reviewers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
@@ -10,3 +9,8 @@ reviewers:
|
||||
- damemi
|
||||
- seanmalloy
|
||||
- ingvagabund
|
||||
- lixiang233
|
||||
emeritus_approvers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
- ravisantoshgudimetla
|
||||
|
||||
322
README.md
322
README.md
@@ -24,36 +24,42 @@ but relies on the default scheduler for that.
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
* [Quick Start](#quick-start)
|
||||
* [Run As A Job](#run-as-a-job)
|
||||
* [Run As A CronJob](#run-as-a-cronjob)
|
||||
* [Install Using Helm](#install-using-helm)
|
||||
* [Install Using Kustomize](#install-using-kustomize)
|
||||
* [User Guide](#user-guide)
|
||||
* [Policy and Strategies](#policy-and-strategies)
|
||||
* [RemoveDuplicates](#removeduplicates)
|
||||
* [LowNodeUtilization](#lownodeutilization)
|
||||
* [RemovePodsViolatingInterPodAntiAffinity](#removepodsviolatinginterpodantiaffinity)
|
||||
* [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity)
|
||||
* [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints)
|
||||
* [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint)
|
||||
* [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts)
|
||||
* [PodLifeTime](#podlifetime)
|
||||
* [Filter Pods](#filter-pods)
|
||||
* [Namespace filtering](#namespace-filtering)
|
||||
* [Priority filtering](#priority-filtering)
|
||||
* [Pod Evictions](#pod-evictions)
|
||||
* [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb)
|
||||
* [Compatibility Matrix](#compatibility-matrix)
|
||||
* [Getting Involved and Contributing](#getting-involved-and-contributing)
|
||||
* [Communicating With Contributors](#communicating-with-contributors)
|
||||
* [Roadmap](#roadmap)
|
||||
* [Code of conduct](#code-of-conduct)
|
||||
<!-- toc -->
|
||||
- [Quick Start](#quick-start)
|
||||
- [Run As A Job](#run-as-a-job)
|
||||
- [Run As A CronJob](#run-as-a-cronjob)
|
||||
- [Run As A Deployment](#run-as-a-deployment)
|
||||
- [Install Using Helm](#install-using-helm)
|
||||
- [Install Using Kustomize](#install-using-kustomize)
|
||||
- [User Guide](#user-guide)
|
||||
- [Policy and Strategies](#policy-and-strategies)
|
||||
- [RemoveDuplicates](#removeduplicates)
|
||||
- [LowNodeUtilization](#lownodeutilization)
|
||||
- [HighNodeUtilization](#highnodeutilization)
|
||||
- [RemovePodsViolatingInterPodAntiAffinity](#removepodsviolatinginterpodantiaffinity)
|
||||
- [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity)
|
||||
- [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints)
|
||||
- [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint)
|
||||
- [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts)
|
||||
- [PodLifeTime](#podlifetime)
|
||||
- [Filter Pods](#filter-pods)
|
||||
- [Namespace filtering](#namespace-filtering)
|
||||
- [Priority filtering](#priority-filtering)
|
||||
- [Label filtering](#label-filtering)
|
||||
- [Node Fit filtering](#node-fit-filtering)
|
||||
- [Pod Evictions](#pod-evictions)
|
||||
- [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb)
|
||||
- [Metrics](#metrics)
|
||||
- [Compatibility Matrix](#compatibility-matrix)
|
||||
- [Getting Involved and Contributing](#getting-involved-and-contributing)
|
||||
- [Communicating With Contributors](#communicating-with-contributors)
|
||||
- [Roadmap](#roadmap)
|
||||
- [Code of conduct](#code-of-conduct)
|
||||
<!-- /toc -->
|
||||
|
||||
## Quick Start
|
||||
|
||||
The descheduler can be run as a Job or CronJob inside of a k8s cluster. It has the
|
||||
The descheduler can be run as a `Job`, `CronJob`, or `Deployment` inside of a k8s cluster. It has the
|
||||
advantage of being able to be run multiple times without needing user intervention.
|
||||
The descheduler pod is run as a critical pod in the `kube-system` namespace to avoid
|
||||
being evicted by itself or by the kubelet.
|
||||
@@ -74,6 +80,14 @@ kubectl create -f kubernetes/base/configmap.yaml
|
||||
kubectl create -f kubernetes/cronjob/cronjob.yaml
|
||||
```
|
||||
|
||||
### Run As A Deployment
|
||||
|
||||
```
|
||||
kubectl create -f kubernetes/base/rbac.yaml
|
||||
kubectl create -f kubernetes/base/configmap.yaml
|
||||
kubectl create -f kubernetes/deployment/deployment.yaml
|
||||
```
|
||||
|
||||
### Install Using Helm
|
||||
|
||||
Starting with release v0.18.0 there is an official helm chart that can be used to install the
|
||||
@@ -84,16 +98,21 @@ The descheduler helm chart is also listed on the [artifact hub](https://artifact
|
||||
### Install Using Kustomize
|
||||
|
||||
You can use kustomize to install descheduler.
|
||||
See the [resources | Kustomize](https://kubernetes-sigs.github.io/kustomize/api-reference/kustomization/resources/) for detailed instructions.
|
||||
See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/kustomize/resource/) for detailed instructions.
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=master' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.21.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=master' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.21.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A Deployment
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.21.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
@@ -103,14 +122,29 @@ See the [user guide](docs/user-guide.md) in the `/docs` directory.
|
||||
## Policy and Strategies
|
||||
|
||||
Descheduler's policy is configurable and includes strategies that can be enabled or disabled.
|
||||
Eight strategies `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingInterPodAntiAffinity`,
|
||||
`RemovePodsViolatingNodeAffinity`, `RemovePodsViolatingNodeTaints`, `RemovePodsViolatingTopologySpreadConstraint`,
|
||||
`RemovePodsHavingTooManyRestarts`, and `PodLifeTime` are currently implemented. As part of the policy, the
|
||||
Nine strategies
|
||||
1. `RemoveDuplicates`
|
||||
2. `LowNodeUtilization`
|
||||
3. `HighNodeUtilization`
|
||||
4. `RemovePodsViolatingInterPodAntiAffinity`
|
||||
5. `RemovePodsViolatingNodeAffinity`
|
||||
6. `RemovePodsViolatingNodeTaints`
|
||||
7. `RemovePodsViolatingTopologySpreadConstraint`
|
||||
8. `RemovePodsHavingTooManyRestarts`
|
||||
9. `PodLifeTime`
|
||||
are currently implemented. As part of the policy, the
|
||||
parameters associated with the strategies can be configured too. By default, all strategies are enabled.
|
||||
|
||||
The following diagram provides a visualization of most of the strategies to help
|
||||
categorize how strategies fit together.
|
||||
|
||||

|
||||
|
||||
The policy also includes common configuration for all the strategies:
|
||||
- `nodeSelector` - limiting the nodes which are processed
|
||||
- `evictLocalStoragePods` - allowing to evict pods with local storage
|
||||
- `evictLocalStoragePods` - allows eviction of pods with local storage
|
||||
- `evictSystemCriticalPods` - [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns
|
||||
- `ignorePvcPods` - set whether PVC pods should be evicted or ignored (defaults to `false`)
|
||||
- `maxNoOfPodsToEvictPerNode` - maximum number of pods evicted from each node (summed through all strategies)
|
||||
|
||||
```yaml
|
||||
@@ -118,22 +152,26 @@ apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
nodeSelector: prod=dev
|
||||
evictLocalStoragePods: true
|
||||
evictSystemCriticalPods: true
|
||||
maxNoOfPodsToEvictPerNode: 40
|
||||
ignorePvcPods: false
|
||||
strategies:
|
||||
...
|
||||
```
|
||||
|
||||
### RemoveDuplicates
|
||||
|
||||
This strategy makes sure that there is only one pod associated with a Replica Set (RS),
|
||||
Replication Controller (RC), Deployment, or Job running on the same node. If there are more,
|
||||
This strategy makes sure that there is only one pod associated with a ReplicaSet (RS),
|
||||
ReplicationController (RC), StatefulSet, or Job running on the same node. If there are more,
|
||||
those duplicate pods are evicted for better spreading of pods in a cluster. This issue could happen
|
||||
if some nodes went down due to whatever reasons, and pods on them were moved to other nodes leading to
|
||||
more than one pod associated with a RS or RC, for example, running on the same node. Once the failed nodes
|
||||
are ready again, this strategy could be enabled to evict those duplicate pods.
|
||||
|
||||
It provides one optional parameter, `ExcludeOwnerKinds`, which is a list of OwnerRef `Kind`s. If a pod
|
||||
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
|
||||
It provides one optional parameter, `excludeOwnerKinds`, which is a list of OwnerRef `Kind`s. If a pod
|
||||
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction. Note that
|
||||
pods created by Deployments are considered for eviction by this strategy. The `excludeOwnerKinds` parameter
|
||||
should include `ReplicaSet` to have pods created by Deployments excluded.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
@@ -143,6 +181,7 @@ has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considere
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
```yaml
|
||||
@@ -164,17 +203,22 @@ in the hope that recreation of evicted pods will be scheduled on these underutil
|
||||
parameters of this strategy are configured under `nodeResourceUtilizationThresholds`.
|
||||
|
||||
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
|
||||
`thresholds` can be configured for cpu, memory, and number of pods in terms of percentage. If a node's
|
||||
usage is below threshold for all (cpu, memory, and number of pods), the node is considered underutilized.
|
||||
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage (the percentage is
|
||||
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
|
||||
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node).
|
||||
|
||||
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
|
||||
Currently, pods request resource requirements are considered for computing node resource utilization.
|
||||
|
||||
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
|
||||
from where pods could be evicted. If a node's usage is above targetThreshold for any (cpu, memory, or number of pods),
|
||||
from where pods could be evicted. If a node's usage is above targetThreshold for any (cpu, memory, number of pods, or extended resources),
|
||||
the node is considered over utilized. Any node between the thresholds, `thresholds` and `targetThresholds` is
|
||||
considered appropriately utilized and is not considered for eviction. The threshold, `targetThresholds`,
|
||||
can be configured for cpu, memory, and number of pods too in terms of percentage.
|
||||
|
||||
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements.
|
||||
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements. Note that this
|
||||
strategy evicts pods from `overutilized nodes` (those with usage above `targetThresholds`) to `underutilized nodes`
|
||||
(those with usage below `thresholds`), it will abort if any number of `underutilized nodes` or `overutilized nodes` is zero.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
@@ -185,6 +229,7 @@ These thresholds, `thresholds` and `targetThresholds`, could be tuned as per you
|
||||
|`numberOfNodes`|int|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -207,19 +252,76 @@ strategies:
|
||||
```
|
||||
|
||||
Policy should pass the following validation checks:
|
||||
* Only three types of resources are supported: `cpu`, `memory` and `pods`.
|
||||
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`.
|
||||
If any of these resource types is not specified, all its thresholds default to 100% to avoid nodes going from underutilized to overutilized.
|
||||
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional,
|
||||
and will not be used to compute node's usage if it's not specified in `thresholds` and `targetThresholds` explicitly.
|
||||
* `thresholds` or `targetThresholds` can not be nil and they must configure exactly the same types of resources.
|
||||
* The valid range of the resource's percentage value is \[0, 100\]
|
||||
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
|
||||
|
||||
If any of the resource types is not specified, all its thresholds default to 100% to avoid nodes going
|
||||
from underutilized to overutilized.
|
||||
|
||||
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||
|
||||
### HighNodeUtilization
|
||||
|
||||
This strategy finds nodes that are under utilized and evicts pods in the hope that these pods will be scheduled compactly into fewer nodes.
|
||||
This strategy **must** be used with the
|
||||
scheduler strategy `MostRequestedPriority`. The parameters of this strategy are configured under `nodeResourceUtilizationThresholds`.
|
||||
|
||||
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
|
||||
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage. The percentage is
|
||||
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
|
||||
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node.
|
||||
|
||||
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
|
||||
Currently, pods request resource requirements are considered for computing node resource utilization.
|
||||
Any node above `thresholds` is considered appropriately utilized and is not considered for eviction.
|
||||
|
||||
The `thresholds` param could be tuned as per your cluster requirements. Note that this
|
||||
strategy evicts pods from `underutilized nodes` (those with usage below `thresholds`)
|
||||
so that they can be recreated in appropriately utilized nodes.
|
||||
The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`thresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"HighNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
```
|
||||
|
||||
Policy should pass the following validation checks:
|
||||
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`. If any of these resource types is not specified, all its thresholds default to 100%.
|
||||
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional, and will not be used to compute node's usage if it's not specified in `thresholds` explicitly.
|
||||
* `thresholds` can not be nil.
|
||||
* The valid range of the resource's percentage value is \[0, 100\]
|
||||
|
||||
There is another parameter associated with the `HighNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
is above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||
|
||||
### RemovePodsViolatingInterPodAntiAffinity
|
||||
|
||||
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example,
|
||||
@@ -235,6 +337,8 @@ node.
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -272,6 +376,8 @@ podA gets evicted from nodeA.
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -300,6 +406,8 @@ and will be evicted.
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -317,13 +425,21 @@ This strategy makes sure that pods violating [topology spread constraints](https
|
||||
are evicted from nodes. Specifically, it tries to evict the minimum number of pods required to balance topology domains to within each constraint's `maxSkew`.
|
||||
This strategy requires k8s version 1.18 at a minimum.
|
||||
|
||||
By default, this strategy only deals with hard constraints, setting parameter `includeSoftConstraints` to `true` will
|
||||
include soft constraints.
|
||||
|
||||
Strategy parameter `labelSelector` is not utilized when balancing topology domains and is only applied during eviction to determine if the pod can be evicted.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`includeSoftConstraints`|bool|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -333,15 +449,18 @@ kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingTopologySpreadConstraint":
|
||||
enabled: true
|
||||
params:
|
||||
includeSoftConstraints: false
|
||||
```
|
||||
|
||||
|
||||
### RemovePodsHavingTooManyRestarts
|
||||
|
||||
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that
|
||||
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
|
||||
include `podRestartThreshold`, which is the number of restarts at which a pod should be evicted, and `includingInitContainers`,
|
||||
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that
|
||||
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
|
||||
include `podRestartThreshold`, which is the number of restarts at which a pod should be evicted, and `includingInitContainers`,
|
||||
which determines whether init container restarts should be factored into that calculation.
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|
||||
**Parameters:**
|
||||
|
||||
@@ -352,6 +471,7 @@ which determines whether init container restarts should be factored into that ca
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -383,6 +503,7 @@ to `Running` and `Pending`.
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -457,6 +578,9 @@ All strategies are able to configure a priority threshold, only pods under the t
|
||||
specify this threshold by setting `thresholdPriorityClassName`(setting the threshold to the value of the given
|
||||
priority class) or `thresholdPriority`(directly setting the threshold) parameters. By default, this threshold
|
||||
is set to the value of `system-cluster-critical` priority class.
|
||||
|
||||
Note: Setting `evictSystemCriticalPods` to true disables priority filtering entirely.
|
||||
|
||||
E.g.
|
||||
|
||||
Setting `thresholdPriority`
|
||||
@@ -488,18 +612,99 @@ strategies:
|
||||
Note that you can't configure both `thresholdPriority` and `thresholdPriorityClassName`, if the given priority class
|
||||
does not exist, descheduler won't create it and will throw an error.
|
||||
|
||||
### Label filtering
|
||||
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#labelselector-v1-meta)
|
||||
to filter pods by their labels:
|
||||
|
||||
* `PodLifeTime`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
* `RemovePodsViolatingNodeAffinity`
|
||||
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||
|
||||
This allows running strategies among pods the descheduler is interested in.
|
||||
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
component: redis
|
||||
matchExpressions:
|
||||
- {key: tier, operator: In, values: [cache]}
|
||||
- {key: environment, operator: NotIn, values: [dev]}
|
||||
```
|
||||
|
||||
|
||||
### Node Fit filtering
|
||||
|
||||
The following strategies accept a `nodeFit` boolean parameter which can optimize descheduling:
|
||||
* `RemoveDuplicates`
|
||||
* `LowNodeUtilization`
|
||||
* `HighNodeUtilization`
|
||||
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||
* `RemovePodsViolatingNodeAffinity`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
|
||||
If set to `true` the descheduler will consider whether or not the pods that meet eviction criteria will fit on other nodes before evicting them. If a pod cannot be rescheduled to another node, it will not be evicted. Currently the following criteria are considered when setting `nodeFit` to `true`:
|
||||
- A `nodeSelector` on the pod
|
||||
- Any `Tolerations` on the pod and any `Taints` on the other nodes
|
||||
- `nodeAffinity` on the pod
|
||||
- Whether any of the other nodes are marked as `unschedulable`
|
||||
|
||||
E.g.
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
nodeFit: true
|
||||
```
|
||||
|
||||
Note that node fit filtering references the current pod spec, and not that of it's owner.
|
||||
Thus, if the pod is owned by a ReplicationController (and that ReplicationController was modified recently),
|
||||
the pod may be running with an outdated spec, which the descheduler will reference when determining node fit.
|
||||
This is expected behavior as the descheduler is a "best-effort" mechanism.
|
||||
|
||||
Using Deployments instead of ReplicationControllers provides an automated rollout of pod spec changes, therefore ensuring that the descheduler has an up-to-date view of the cluster state.
|
||||
|
||||
## Pod Evictions
|
||||
|
||||
When the descheduler decides to evict pods from a node, it employs the following general mechanism:
|
||||
|
||||
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted.
|
||||
* Pods (static or mirrored pods or stand alone pods) not part of an RC, RS, Deployment or Job are
|
||||
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted (unless `evictSystemCriticalPods: true` is set).
|
||||
* Pods (static or mirrored pods or stand alone pods) not part of an ReplicationController, ReplicaSet(Deployment), StatefulSet, or Job are
|
||||
never evicted because these pods won't be recreated.
|
||||
* Pods associated with DaemonSets are never evicted.
|
||||
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set)
|
||||
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set).
|
||||
* Pods with PVCs are evicted (unless `ignorePvcPods: true` is set).
|
||||
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
|
||||
best effort pods are evicted before burstable and guaranteed pods.
|
||||
* All types of pods with the annotation descheduler.alpha.kubernetes.io/evict are evicted. This
|
||||
* All types of pods with the annotation `descheduler.alpha.kubernetes.io/evict` are eligible for eviction. This
|
||||
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
|
||||
Users should know how and if the pod will be recreated.
|
||||
|
||||
@@ -510,6 +715,16 @@ Setting `--v=4` or greater on the Descheduler will log all reasons why any pod i
|
||||
Pods subject to a Pod Disruption Budget(PDB) are not evicted if descheduling violates its PDB. The pods
|
||||
are evicted by using the eviction subresource to handle PDB.
|
||||
|
||||
## Metrics
|
||||
|
||||
| name | type | description |
|
||||
|-------|-------|----------------|
|
||||
| build_info | gauge | constant 1 |
|
||||
| pods_evicted | CounterVec | total number of pods evicted |
|
||||
|
||||
The metrics are served through https://localhost:10258/metrics by default.
|
||||
The address and port can be changed by setting `--binding-address` and `--secure-port` flags.
|
||||
|
||||
## Compatibility Matrix
|
||||
The below compatibility matrix shows the k8s client package(client-go, apimachinery, etc) versions that descheduler
|
||||
is compiled with. At this time descheduler does not have a hard dependency to a specific k8s release. However a
|
||||
@@ -521,6 +736,7 @@ packages that it is compiled with.
|
||||
|
||||
Descheduler | Supported Kubernetes Version
|
||||
-------------|-----------------------------
|
||||
v0.21 | v1.21
|
||||
v0.20 | v1.20
|
||||
v0.19 | v1.19
|
||||
v0.18 | v1.18
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.20.0
|
||||
appVersion: 0.20.0
|
||||
version: 0.21.0
|
||||
appVersion: 0.21.0
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
|
||||
@@ -50,6 +50,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
|
||||
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
|
||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` |
|
||||
@@ -59,7 +60,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||
| `podSecurityPolicy.create` | If `true`, create PodSecurityPolicy | `true` |
|
||||
| `resources.cpuRequest` | Descheduler container CPU request | `500m` |
|
||||
| `resources.memoryRequest` | Descheduler container memory request | `256Mi` |
|
||||
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
|
||||
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
||||
| `nodeSelector` | Node selectors to run the descheduler cronjob on specific nodes | `nil` |
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: batch/v1beta1
|
||||
apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
@@ -33,6 +33,10 @@ spec:
|
||||
{{- .Values.podLabels | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
@@ -55,6 +59,14 @@ spec:
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 16 }}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
29
charts/descheduler/templates/tests/test-descheduler-pod.yaml
Normal file
29
charts/descheduler/templates/tests/test-descheduler-pod.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: descheduler-test-pod
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
serviceAccountName: descheduler-ci
|
||||
containers:
|
||||
- name: descheduler-test-container
|
||||
image: alpine:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- All
|
||||
privileged: false
|
||||
runAsNonRoot: false
|
||||
command: ["/bin/ash"]
|
||||
args:
|
||||
- -c
|
||||
- >-
|
||||
apk --no-cache add curl &&
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl &&
|
||||
chmod +x ./kubectl &&
|
||||
mv ./kubectl /usr/local/bin/kubectl &&
|
||||
/usr/local/bin/kubectl get pods --namespace kube-system --token "$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | grep "descheduler" | grep "Completed"
|
||||
@@ -19,6 +19,7 @@ resources:
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
cronJobApiVersion: "batch/v1" # Use "batch/v1beta1" for k8s version < 1.21.0. TODO(@7i) remove with 1.23 release
|
||||
schedule: "*/2 * * * *"
|
||||
#startingDeadlineSeconds: 200
|
||||
#successfulJobsHistoryLimit: 1
|
||||
@@ -39,8 +40,8 @@ deschedulerPolicy:
|
||||
RemovePodsViolatingNodeAffinity:
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- requiredDuringSchedulingIgnoredDuringExecution
|
||||
nodeAffinityType:
|
||||
- requiredDuringSchedulingIgnoredDuringExecution
|
||||
RemovePodsViolatingInterPodAntiAffinity:
|
||||
enabled: true
|
||||
LowNodeUtilization:
|
||||
@@ -58,6 +59,9 @@ deschedulerPolicy:
|
||||
|
||||
priorityClassName: system-cluster-critical
|
||||
|
||||
nodeSelector: {}
|
||||
# foo: bar
|
||||
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
|
||||
@@ -19,20 +19,29 @@ package options
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/component-base/logs"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultDeschedulerPort = 10258
|
||||
)
|
||||
|
||||
// DeschedulerServer configuration
|
||||
type DeschedulerServer struct {
|
||||
componentconfig.DeschedulerConfiguration
|
||||
Client clientset.Interface
|
||||
Logs *logs.Options
|
||||
|
||||
Client clientset.Interface
|
||||
Logs *logs.Options
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
DisableMetrics bool
|
||||
}
|
||||
|
||||
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
||||
@@ -41,9 +50,14 @@ func NewDeschedulerServer() (*DeschedulerServer, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
secureServing := apiserveroptions.NewSecureServingOptions().WithLoopback()
|
||||
secureServing.BindPort = DefaultDeschedulerPort
|
||||
|
||||
return &DeschedulerServer{
|
||||
DeschedulerConfiguration: *cfg,
|
||||
Logs: logs.NewOptions(),
|
||||
SecureServing: secureServing,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -66,7 +80,7 @@ func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, err
|
||||
|
||||
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
||||
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&rs.Logging.Format, "logging-format", rs.Logging.Format, `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
|
||||
fs.StringVar(&rs.Logging.Format, "logging-format", "text", `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
|
||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
||||
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
|
||||
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
||||
@@ -77,4 +91,7 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler")
|
||||
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
|
||||
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "DEPRECATED: enables evicting pods using local storage by descheduler")
|
||||
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
|
||||
|
||||
rs.SecureServing.AddFlags(fs)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ limitations under the License.
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"io"
|
||||
|
||||
@@ -26,7 +27,11 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
apiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/server/mux"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
aflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
@@ -46,9 +51,30 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
s.Logs.LogFormat = s.Logging.Format
|
||||
s.Logs.Apply()
|
||||
|
||||
// LoopbackClientConfig is a config for a privileged loopback connection
|
||||
var LoopbackClientConfig *restclient.Config
|
||||
var SecureServing *apiserver.SecureServingInfo
|
||||
if err := s.SecureServing.ApplyTo(&SecureServing, &LoopbackClientConfig); err != nil {
|
||||
klog.ErrorS(err, "failed to apply secure server configuration")
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.Validate(); err != nil {
|
||||
klog.ErrorS(err, "failed to validate server configuration")
|
||||
return
|
||||
}
|
||||
|
||||
if !s.DisableMetrics {
|
||||
ctx := context.TODO()
|
||||
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
|
||||
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
|
||||
|
||||
if _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done()); err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err := Run(s)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "descheduler server")
|
||||
|
||||
@@ -18,77 +18,19 @@ package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
)
|
||||
|
||||
var (
|
||||
// version is a constant representing the version tag that
|
||||
// generated this build. It should be set during build via -ldflags.
|
||||
version string
|
||||
// buildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
//It should be set during build via -ldflags.
|
||||
buildDate string
|
||||
)
|
||||
|
||||
// Info holds the information related to descheduler app version.
|
||||
type Info struct {
|
||||
Major string `json:"major"`
|
||||
Minor string `json:"minor"`
|
||||
GitVersion string `json:"gitVersion"`
|
||||
BuildDate string `json:"buildDate"`
|
||||
GoVersion string `json:"goVersion"`
|
||||
Compiler string `json:"compiler"`
|
||||
Platform string `json:"platform"`
|
||||
}
|
||||
|
||||
// Get returns the overall codebase version. It's for detecting
|
||||
// what code a binary was built from.
|
||||
func Get() Info {
|
||||
majorVersion, minorVersion := splitVersion(version)
|
||||
return Info{
|
||||
Major: majorVersion,
|
||||
Minor: minorVersion,
|
||||
GitVersion: version,
|
||||
BuildDate: buildDate,
|
||||
GoVersion: runtime.Version(),
|
||||
Compiler: runtime.Compiler,
|
||||
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
|
||||
}
|
||||
}
|
||||
|
||||
func NewVersionCommand() *cobra.Command {
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Version of descheduler",
|
||||
Long: `Prints the version of descheduler.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("Descheduler version %+v\n", Get())
|
||||
fmt.Printf("Descheduler version %+v\n", version.Get())
|
||||
},
|
||||
}
|
||||
return versionCmd
|
||||
}
|
||||
|
||||
// splitVersion splits the git version to generate major and minor versions needed.
|
||||
func splitVersion(version string) (string, string) {
|
||||
if version == "" {
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// Version from an automated container build environment for a tag. For example v20200521-v0.18.0.
|
||||
m1, _ := regexp.MatchString(`^v\d{8}-v\d+\.\d+\.\d+$`, version)
|
||||
|
||||
// Version from an automated container build environment(not a tag) or a local build. For example v20201009-v0.18.0-46-g939c1c0.
|
||||
m2, _ := regexp.MatchString(`^v\d{8}-v\d+\.\d+\.\d+-\w+-\w+$`, version)
|
||||
|
||||
if m1 || m2 {
|
||||
semVer := strings.Split(version, "-")[1]
|
||||
return strings.Trim(strings.Split(semVer, ".")[0], "v"), strings.Split(semVer, ".")[1] + "+"
|
||||
}
|
||||
|
||||
// Something went wrong
|
||||
return "", ""
|
||||
}
|
||||
|
||||
@@ -3,10 +3,10 @@
|
||||
## Required Tools
|
||||
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
- [Go 1.15+](https://golang.org/dl/)
|
||||
- [Go 1.16+](https://golang.org/dl/)
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
|
||||
- [kind v0.9.0+](https://kind.sigs.k8s.io/)
|
||||
- [kind v0.10.0+](https://kind.sigs.k8s.io/)
|
||||
|
||||
## Build and Run
|
||||
|
||||
@@ -39,5 +39,18 @@ make test-unit
|
||||
make test-e2e
|
||||
```
|
||||
|
||||
## Run Helm Tests
|
||||
Run the helm test for a particular descheduler release by setting below variables,
|
||||
```
|
||||
HELM_IMAGE_REPO="descheduler"
|
||||
HELM_IMAGE_TAG="helm-test"
|
||||
HELM_CHART_LOCATION="./charts/descheduler"
|
||||
```
|
||||
The helm tests runs as part of descheduler CI. But, to run it manually from the descheduler root,
|
||||
|
||||
```
|
||||
make test-helm
|
||||
```
|
||||
|
||||
### Miscellaneous
|
||||
See the [hack directory](https://github.com/kubernetes-sigs/descheduler/tree/master/hack) for additional tools and scripts used for developing the descheduler.
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
# User Guide
|
||||
|
||||
Starting with descheduler release v0.10.0 container images are available in the official k8s container registry.
|
||||
* `k8s.gcr.io/descheduler/descheduler`
|
||||
|
||||
Also, starting with descheduler release v0.20.0 multi-arch container images are provided. Currently AMD64 and ARM64
|
||||
container images are provided. Multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from
|
||||
a registry. Therefore starting with descheduler release v0.20.0 use the below process to download the official descheduler
|
||||
Descheduler Version | Container Image | Architectures |
|
||||
------------------- |-----------------------------------------------------|-------------------------|
|
||||
v0.21.0 | k8s.gcr.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.20.0 | k8s.gcr.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
|
||||
v0.19.0 | k8s.gcr.io/descheduler/descheduler:v0.19.0 | AMD64 |
|
||||
v0.18.0 | k8s.gcr.io/descheduler/descheduler:v0.18.0 | AMD64 |
|
||||
v0.10.0 | k8s.gcr.io/descheduler/descheduler:v0.10.0 | AMD64 |
|
||||
|
||||
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
|
||||
starting with descheduler release v0.20.0 use the below process to download the official descheduler
|
||||
image into a kind cluster.
|
||||
```
|
||||
kind create cluster
|
||||
@@ -86,12 +92,12 @@ strategies:
|
||||
```
|
||||
|
||||
### Balance Cluster By Node Memory Utilization
|
||||
|
||||
If your cluster has been running for a long period of time, you may find that the resource utilization is not very
|
||||
balanced. The `LowNodeUtilization` strategy can be used to rebalance your cluster based on `cpu`, `memory`
|
||||
balanced. The following two strategies can be used to rebalance your cluster based on `cpu`, `memory`
|
||||
or `number of pods`.
|
||||
|
||||
Using the following policy configuration file, descheduler will rebalance the cluster based on memory by evicting pods
|
||||
#### Balance high utilization nodes
|
||||
Using `LowNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods
|
||||
from nodes with memory utilization over 70% to nodes with memory utilization below 20%.
|
||||
|
||||
```
|
||||
@@ -108,6 +114,23 @@ strategies:
|
||||
"memory": 70
|
||||
```
|
||||
|
||||
#### Balance low utilization nodes
|
||||
Using `HighNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods
|
||||
from nodes with memory utilization lower than 20%. This should be used along with scheduler strategy `MostRequestedPriority`.
|
||||
The evicted pods will be compacted into minimal set of nodes.
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"HighNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
```
|
||||
|
||||
### Autoheal Node Problems
|
||||
Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with
|
||||
[Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and
|
||||
|
||||
10
examples/high-node-utilization.yml
Normal file
10
examples/high-node-utilization.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"HighNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
22
go.mod
22
go.mod
@@ -1,17 +1,19 @@
|
||||
module sigs.k8s.io/descheduler
|
||||
|
||||
go 1.15
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/cobra v1.1.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
k8s.io/api v0.20.0
|
||||
k8s.io/apimachinery v0.20.0
|
||||
k8s.io/apiserver v0.20.0
|
||||
k8s.io/client-go v0.20.0
|
||||
k8s.io/code-generator v0.20.0
|
||||
k8s.io/component-base v0.20.0
|
||||
k8s.io/component-helpers v0.20.0
|
||||
k8s.io/klog/v2 v2.4.0
|
||||
k8s.io/api v0.21.0
|
||||
k8s.io/apimachinery v0.21.0
|
||||
k8s.io/apiserver v0.21.0
|
||||
k8s.io/client-go v0.21.0
|
||||
k8s.io/code-generator v0.21.0
|
||||
k8s.io/component-base v0.21.0
|
||||
k8s.io/component-helpers v0.21.0
|
||||
k8s.io/klog/v2 v2.8.0
|
||||
k8s.io/kubectl v0.20.5
|
||||
sigs.k8s.io/mdtoc v1.0.1
|
||||
)
|
||||
|
||||
281
go.sum
281
go.sum
@@ -1,6 +1,5 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
@@ -16,6 +15,7 @@ cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNF
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
@@ -26,8 +26,9 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.11.1 h1:eVvIXUKiTgv++6YnWb42DUA1YL7qDugnKP0HljexdnQ=
|
||||
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
|
||||
github.com/Azure/go-autorest/autorest v0.11.12 h1:gI8ytXbxMfI+IVbI9mP2JGCTXIuhHLgRlvQ9X4PsnHE=
|
||||
github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
|
||||
@@ -40,9 +41,14 @@ github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8
|
||||
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||
@@ -51,40 +57,59 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
@@ -94,13 +119,14 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
|
||||
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
@@ -110,32 +136,33 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
|
||||
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||
github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc=
|
||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||
github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw=
|
||||
github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
|
||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
@@ -146,7 +173,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
@@ -156,17 +182,20 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho=
|
||||
github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8=
|
||||
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
|
||||
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 h1:LP/6EfrZ/LyCc+SXvANDrIJ4sP9u2NAtqyv6QknetNQ=
|
||||
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
@@ -182,7 +211,6 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -190,16 +218,40 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
@@ -207,6 +259,7 @@ github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
@@ -214,34 +267,51 @@ github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
|
||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -249,9 +319,13 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
@@ -260,6 +334,8 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -268,58 +344,83 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
|
||||
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
@@ -332,17 +433,18 @@ go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -372,12 +474,15 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 h1:xUIPaMhvROX9dhPvRCenIJtU78+lbEenGbgqB5hfHCQ=
|
||||
golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -397,15 +502,14 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM=
|
||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -415,12 +519,15 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -434,6 +541,7 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -445,26 +553,31 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY=
|
||||
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 h1:8qxJSnu+7dRq6upnbntrmriWByIakBuct5OM/MdQC1M=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -473,6 +586,7 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
@@ -484,6 +598,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
@@ -500,11 +615,13 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8=
|
||||
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -520,7 +637,6 @@ google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||
@@ -544,6 +660,7 @@ google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfG
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8=
|
||||
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
@@ -551,6 +668,7 @@ google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
@@ -560,22 +678,23 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
@@ -586,51 +705,69 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.20.0 h1:WwrYoZNM1W1aQEbyl8HNG+oWGzLpZQBlcerS9BQw9yI=
|
||||
k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg=
|
||||
k8s.io/apimachinery v0.20.0 h1:jjzbTJRXk0unNS71L7h3lxGDH/2HPxMPaQY+MjECKL8=
|
||||
k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apiserver v0.20.0 h1:0MwO4xCoqZwhoLbFyyBSJdu55CScp4V4sAgX6z4oPBY=
|
||||
k8s.io/apiserver v0.20.0/go.mod h1:6gRIWiOkvGvQt12WTYmsiYoUyYW0FXSiMdNl4m+sxY8=
|
||||
k8s.io/client-go v0.20.0 h1:Xlax8PKbZsjX4gFvNtt4F5MoJ1V5prDvCuoq9B7iax0=
|
||||
k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY=
|
||||
k8s.io/code-generator v0.20.0 h1:c8JaABvEEZPDE8MICTOtveHX2axchl+EptM+o4OGvbg=
|
||||
k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
|
||||
k8s.io/component-base v0.20.0 h1:BXGL8iitIQD+0NgW49UsM7MraNUUGDU3FBmrfUAtmVQ=
|
||||
k8s.io/component-base v0.20.0/go.mod h1:wKPj+RHnAr8LW2EIBIK7AxOHPde4gme2lzXwVSoRXeA=
|
||||
k8s.io/component-helpers v0.20.0 h1:7Zi1fcb5nV0h03d9eeZGk71+ZWYvAN4Be+xMOZyFerc=
|
||||
k8s.io/component-helpers v0.20.0/go.mod h1:nx6NOtfSfGOxnSZsDJxpGbnsVuUA1UXpwDvZIrtigNk=
|
||||
k8s.io/api v0.20.5/go.mod h1:FQjAceXnVaWDeov2YUWhOb6Yt+5UjErkp6UO3nczO1Y=
|
||||
k8s.io/api v0.21.0 h1:gu5iGF4V6tfVCQ/R+8Hc0h7H1JuEhzyEi9S4R5LM8+Y=
|
||||
k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU=
|
||||
k8s.io/apimachinery v0.20.5/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apimachinery v0.21.0 h1:3Fx+41if+IRavNcKOz09FwEXDBG6ORh6iMsTSelhkMA=
|
||||
k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
|
||||
k8s.io/apiserver v0.21.0 h1:1hWMfsz+cXxB77k6/y0XxWxwl6l9OF26PC9QneUVn1Q=
|
||||
k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg=
|
||||
k8s.io/cli-runtime v0.20.5/go.mod h1:ihjPeQWDk7NGVIkNEvpwxA3gJvqtU+LtkDj11TvyXn4=
|
||||
k8s.io/client-go v0.20.5/go.mod h1:Ee5OOMMYvlH8FCZhDsacjMlCBwetbGZETwo1OA+e6Zw=
|
||||
k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag=
|
||||
k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA=
|
||||
k8s.io/code-generator v0.20.5/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
|
||||
k8s.io/code-generator v0.21.0 h1:LGWJOvkbBNpuRBqBRXUjzfvymUh7F/iR2KDpwLnqCM4=
|
||||
k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q=
|
||||
k8s.io/component-base v0.20.5/go.mod h1:l0isoBLGyQKwRoTWbPHR6jNDd3/VqQD43cNlsjddGng=
|
||||
k8s.io/component-base v0.21.0 h1:tLLGp4BBjQaCpS/KiuWh7m2xqvAdsxLm4ATxHSe5Zpg=
|
||||
k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw=
|
||||
k8s.io/component-helpers v0.20.5/go.mod h1:AzTdoPj6YAN2SUfhBX/FUUU3ntfFuse03q/VMLovEsE=
|
||||
k8s.io/component-helpers v0.21.0 h1:SoWLsd63LI5uwofcHVSO4jtlmZEJRycfwNBKU4eAGPQ=
|
||||
k8s.io/component-helpers v0.21.0/go.mod h1:tezqefP7lxfvJyR+0a+6QtVrkZ/wIkyMLK4WcQ3Cj8U=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20201113003025-83324d819ded h1:JApXBKYyB7l9xx+DK7/+mFjC7A9Bt5A93FPvFD0HIFE=
|
||||
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
|
||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c=
|
||||
k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts=
|
||||
k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0=
|
||||
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
|
||||
k8s.io/kubectl v0.20.5 h1:/wndy8hw5TsL8G8KWPDJrtPKS8D34uSdWS0BMRmtzWs=
|
||||
k8s.io/kubectl v0.20.5/go.mod h1:mlNQgyV18D4XFt5BmfSkrxQNS+arT2pXDQxxnH5lMiw=
|
||||
k8s.io/metrics v0.20.5/go.mod h1:vsptOayjKWKWHvWR1vFQY++vxydzaEo/2+JC7kSDKPU=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
|
||||
sigs.k8s.io/mdtoc v1.0.1 h1:6ECKhQnbetwZBR6R2IeT2LH+1w+2Zsip0iXjikgaXIk=
|
||||
sigs.k8s.io/mdtoc v1.0.1/go.mod h1:COYBtOjsaCg7o7SC4eaLwEXPuVRSuiVuLLRrHd7kShw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
||||
@@ -22,4 +22,5 @@ package tools
|
||||
import (
|
||||
_ "github.com/client9/misspell/cmd/misspell"
|
||||
_ "k8s.io/code-generator"
|
||||
_ "sigs.k8s.io/mdtoc"
|
||||
)
|
||||
|
||||
@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.13|go1.14|go1.15') ]]; then
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
25
hack/update-toc.sh
Executable file
25
hack/update-toc.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/mdtoc --inplace README.md
|
||||
36
hack/verify-conversions.sh
Executable file
36
hack/verify-conversions.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
mkdir -p "${DESCHEDULER_ROOT}/_tmp"
|
||||
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
|
||||
|
||||
_deschedulertmp="${_tmpdir}"
|
||||
mkdir -p "${_deschedulertmp}"
|
||||
|
||||
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
|
||||
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||
|
||||
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/conversion-gen"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/conversion-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "./pkg/apis/componentconfig/v1alpha1,./pkg/api/v1alpha1" \
|
||||
--output-file-base zz_generated.conversion
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
|
||||
echo "Generated output differs:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Generated conversions verify failed. Please run ./hack/update-generated-conversions.sh (and commit the result)"
|
||||
exit 1
|
||||
fi
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
echo "Generated conversions verified."
|
||||
36
hack/verify-deep-copies.sh
Executable file
36
hack/verify-deep-copies.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
mkdir -p "${DESCHEDULER_ROOT}/_tmp"
|
||||
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
|
||||
|
||||
_deschedulertmp="${_tmpdir}"
|
||||
mkdir -p "${_deschedulertmp}"
|
||||
|
||||
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
|
||||
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||
|
||||
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepcopy-gen"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "./pkg/apis/componentconfig,./pkg/apis/componentconfig/v1alpha1,./pkg/api,./pkg/api/v1alpha1" \
|
||||
--output-file-base zz_generated.deepcopy
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
|
||||
echo "Generated deep-copies output differs:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Generated deep-copies verify failed. Please run ./hack/update-generated-deep-copies.sh (and commit the result)"
|
||||
exit 1
|
||||
fi
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
echo "Generated deep-copies verified."
|
||||
@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.13|go1.14|go1.15') ]]; then
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
19
hack/verify-govet.sh
Executable file
19
hack/verify-govet.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go vet ${OS_ROOT}/...
|
||||
29
hack/verify-toc.sh
Executable file
29
hack/verify-toc.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
|
||||
|
||||
if ! ${OS_OUTPUT_BINPATH}/mdtoc --inplace --dryrun README.md
|
||||
then
|
||||
echo "ERROR: Changes detected to table of contents. Run ./hack/update-toc.sh" >&2
|
||||
exit 1
|
||||
fi
|
||||
@@ -33,7 +33,6 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: descheduler-cluster-role-binding
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
apiVersion: batch/v1beta1
|
||||
apiVersion: batch/v1 # for k8s version < 1.21.0, use batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: descheduler-cronjob
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.20.0
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.21.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
@@ -31,6 +31,14 @@ spec:
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: "256Mi"
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
restartPolicy: "Never"
|
||||
serviceAccountName: descheduler-sa
|
||||
volumes:
|
||||
|
||||
54
kubernetes/deployment/deployment.yaml
Normal file
54
kubernetes/deployment/deployment.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: descheduler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: descheduler
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: descheduler
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: descheduler
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: descheduler-sa
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.21.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--descheduling-interval"
|
||||
- "5m"
|
||||
- "--v"
|
||||
- "3"
|
||||
ports:
|
||||
- containerPort: 10258
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: descheduler-policy-configmap
|
||||
6
kubernetes/deployment/kustomization.yaml
Normal file
6
kubernetes/deployment/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ../base
|
||||
- deployment.yaml
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.20.0
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.21.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
@@ -29,6 +29,14 @@ spec:
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: "256Mi"
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
restartPolicy: "Never"
|
||||
serviceAccountName: descheduler-sa
|
||||
volumes:
|
||||
|
||||
72
metrics/metrics.go
Normal file
72
metrics/metrics.go
Normal file
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
)
|
||||
|
||||
const (
|
||||
// DeschedulerSubsystem - subsystem name used by descheduler
|
||||
DeschedulerSubsystem = "descheduler"
|
||||
)
|
||||
|
||||
var (
|
||||
PodsEvicted = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "pods_evicted",
|
||||
Help: "Number of evicted pods, by the result, by the strategy, by the namespace. 'failed' result means a pod could not be evicted",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
}, []string{"result", "strategy", "namespace"})
|
||||
|
||||
buildInfo = metrics.NewGauge(
|
||||
&metrics.GaugeOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "build_info",
|
||||
Help: "Build info about descheduler, including Go version, Descheduler version, Git SHA, Git branch",
|
||||
ConstLabels: map[string]string{"GoVersion": version.Get().GoVersion, "DeschedulerVersion": version.Get().GitVersion, "GitBranch": version.Get().GitBranch, "GitSha1": version.Get().GitSha1},
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
|
||||
metricsList = []metrics.Registerable{
|
||||
PodsEvicted,
|
||||
buildInfo,
|
||||
}
|
||||
)
|
||||
|
||||
var registerMetrics sync.Once
|
||||
|
||||
// Register all metrics.
|
||||
func Register() {
|
||||
// Register the metrics.
|
||||
registerMetrics.Do(func() {
|
||||
RegisterMetrics(metricsList...)
|
||||
})
|
||||
}
|
||||
|
||||
// RegisterMetrics registers a list of metrics.
|
||||
func RegisterMetrics(extraMetrics ...metrics.Registerable) {
|
||||
for _, metric := range extraMetrics {
|
||||
legacyregistry.MustRegister(metric)
|
||||
}
|
||||
}
|
||||
@@ -35,6 +35,12 @@ type DeschedulerPolicy struct {
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods *bool
|
||||
|
||||
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
|
||||
EvictSystemCriticalPods *bool
|
||||
|
||||
// IgnorePVCPods prevents pods with PVCs from being evicted.
|
||||
IgnorePVCPods *bool
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode *int
|
||||
}
|
||||
@@ -69,9 +75,12 @@ type StrategyParameters struct {
|
||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts
|
||||
PodLifeTime *PodLifeTime
|
||||
RemoveDuplicates *RemoveDuplicates
|
||||
IncludeSoftConstraints bool
|
||||
Namespaces *Namespaces
|
||||
ThresholdPriority *int32
|
||||
ThresholdPriorityClassName string
|
||||
LabelSelector *metav1.LabelSelector
|
||||
NodeFit bool
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
|
||||
@@ -35,6 +35,12 @@ type DeschedulerPolicy struct {
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"`
|
||||
|
||||
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
|
||||
EvictSystemCriticalPods *bool `json:"evictSystemCriticalPods,omitempty"`
|
||||
|
||||
// IgnorePVCPods prevents pods with PVCs from being evicted.
|
||||
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode *int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
||||
}
|
||||
@@ -67,9 +73,12 @@ type StrategyParameters struct {
|
||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
|
||||
PodLifeTime *PodLifeTime `json:"podLifeTime,omitempty"`
|
||||
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
|
||||
IncludeSoftConstraints bool `json:"includeSoftConstraints"`
|
||||
Namespaces *Namespaces `json:"namespaces"`
|
||||
ThresholdPriority *int32 `json:"thresholdPriority"`
|
||||
ThresholdPriorityClassName string `json:"thresholdPriorityClassName"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
NodeFit bool `json:"nodeFit"`
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -23,6 +23,7 @@ package v1alpha1
|
||||
import (
|
||||
unsafe "unsafe"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
api "sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -122,6 +123,8 @@ func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Desched
|
||||
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
|
||||
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
||||
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
|
||||
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
|
||||
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
|
||||
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||
return nil
|
||||
}
|
||||
@@ -135,6 +138,8 @@ func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.Des
|
||||
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
|
||||
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
||||
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
|
||||
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
|
||||
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
|
||||
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||
return nil
|
||||
}
|
||||
@@ -284,9 +289,12 @@ func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *Strat
|
||||
out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
||||
out.PodLifeTime = (*api.PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
|
||||
out.RemoveDuplicates = (*api.RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
||||
out.IncludeSoftConstraints = in.IncludeSoftConstraints
|
||||
out.Namespaces = (*api.Namespaces)(unsafe.Pointer(in.Namespaces))
|
||||
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
|
||||
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
|
||||
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
|
||||
out.NodeFit = in.NodeFit
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -301,9 +309,12 @@ func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.S
|
||||
out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
||||
out.PodLifeTime = (*PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
|
||||
out.RemoveDuplicates = (*RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
||||
out.IncludeSoftConstraints = in.IncludeSoftConstraints
|
||||
out.Namespaces = (*Namespaces)(unsafe.Pointer(in.Namespaces))
|
||||
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
|
||||
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
|
||||
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
|
||||
out.NodeFit = in.NodeFit
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -21,6 +21,7 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -45,6 +46,16 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictSystemCriticalPods != nil {
|
||||
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.IgnorePVCPods != nil {
|
||||
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||
*out = new(int)
|
||||
@@ -293,6 +304,11 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -21,6 +21,7 @@ limitations under the License.
|
||||
package api
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -45,6 +46,16 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictSystemCriticalPods != nil {
|
||||
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.IgnorePVCPods != nil {
|
||||
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||
*out = new(int)
|
||||
@@ -293,6 +304,11 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -50,6 +50,9 @@ type DeschedulerConfiguration struct {
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool
|
||||
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool
|
||||
|
||||
// Logging specifies the options of logging.
|
||||
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
|
||||
Logging componentbaseconfig.LoggingConfiguration
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -48,4 +49,11 @@ type DeschedulerConfiguration struct {
|
||||
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
|
||||
|
||||
// Logging specifies the options of logging.
|
||||
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
|
||||
Logging componentbaseconfig.LoggingConfiguration `json:"logging,omitempty"`
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -56,6 +56,8 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
out.Logging = in.Logging
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -72,6 +74,8 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
out.Logging = in.Logging
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.Logging = in.Logging
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.Logging = in.Logging
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ package descheduler
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@@ -27,6 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
@@ -36,6 +38,8 @@ import (
|
||||
)
|
||||
|
||||
func Run(rs *options.DeschedulerServer) error {
|
||||
metrics.Register()
|
||||
|
||||
ctx := context.Background()
|
||||
rsclient, err := client.CreateClient(rs.KubeconfigFile)
|
||||
if err != nil {
|
||||
@@ -69,9 +73,10 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
sharedInformerFactory.Start(stopChannel)
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
|
||||
strategyFuncs := map[string]strategyFunction{
|
||||
strategyFuncs := map[api.StrategyName]strategyFunction{
|
||||
"RemoveDuplicates": strategies.RemoveDuplicatePods,
|
||||
"LowNodeUtilization": strategies.LowNodeUtilization,
|
||||
"LowNodeUtilization": nodeutilization.LowNodeUtilization,
|
||||
"HighNodeUtilization": nodeutilization.HighNodeUtilization,
|
||||
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
|
||||
"RemovePodsViolatingNodeAffinity": strategies.RemovePodsViolatingNodeAffinity,
|
||||
"RemovePodsViolatingNodeTaints": strategies.RemovePodsViolatingNodeTaints,
|
||||
@@ -90,13 +95,26 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
|
||||
}
|
||||
|
||||
evictSystemCriticalPods := false
|
||||
if deschedulerPolicy.EvictSystemCriticalPods != nil {
|
||||
evictSystemCriticalPods = *deschedulerPolicy.EvictSystemCriticalPods
|
||||
if evictSystemCriticalPods {
|
||||
klog.V(1).InfoS("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
|
||||
}
|
||||
}
|
||||
|
||||
ignorePvcPods := false
|
||||
if deschedulerPolicy.IgnorePVCPods != nil {
|
||||
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
|
||||
}
|
||||
|
||||
maxNoOfPodsToEvictPerNode := rs.MaxNoOfPodsToEvictPerNode
|
||||
if deschedulerPolicy.MaxNoOfPodsToEvictPerNode != nil {
|
||||
maxNoOfPodsToEvictPerNode = *deschedulerPolicy.MaxNoOfPodsToEvictPerNode
|
||||
}
|
||||
|
||||
wait.Until(func() {
|
||||
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector, stopChannel)
|
||||
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector)
|
||||
if err != nil {
|
||||
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
|
||||
close(stopChannel)
|
||||
@@ -116,14 +134,22 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
maxNoOfPodsToEvictPerNode,
|
||||
nodes,
|
||||
evictLocalStoragePods,
|
||||
evictSystemCriticalPods,
|
||||
ignorePvcPods,
|
||||
)
|
||||
|
||||
for name, f := range strategyFuncs {
|
||||
if strategy := deschedulerPolicy.Strategies[api.StrategyName(name)]; strategy.Enabled {
|
||||
f(ctx, rs.Client, strategy, nodes, podEvictor)
|
||||
for name, strategy := range deschedulerPolicy.Strategies {
|
||||
if f, ok := strategyFuncs[name]; ok {
|
||||
if strategy.Enabled {
|
||||
f(ctx, rs.Client, strategy, nodes, podEvictor)
|
||||
}
|
||||
} else {
|
||||
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
|
||||
|
||||
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
|
||||
if rs.DeschedulingInterval.Seconds() == 0 {
|
||||
close(stopChannel)
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
@@ -44,15 +44,21 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
}
|
||||
rs.Client = client
|
||||
rs.DeschedulingInterval = 100 * time.Millisecond
|
||||
errChan := make(chan error, 1)
|
||||
defer close(errChan)
|
||||
go func() {
|
||||
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1", stopChannel)
|
||||
errChan <- err
|
||||
}()
|
||||
select {
|
||||
case err := <-errChan:
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||
}
|
||||
}()
|
||||
case <-time.After(300 * time.Millisecond):
|
||||
// Wait for few cycles and then verify the only pod still exists
|
||||
}
|
||||
|
||||
// Wait for few cycles and then verify the only pod still exists
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Unable to list pods: %v", err)
|
||||
|
||||
@@ -25,12 +25,15 @@ import (
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
|
||||
@@ -45,12 +48,15 @@ const (
|
||||
type nodePodEvictedCount map[*v1.Node]int
|
||||
|
||||
type PodEvictor struct {
|
||||
client clientset.Interface
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode int
|
||||
nodepodCount nodePodEvictedCount
|
||||
evictLocalStoragePods bool
|
||||
client clientset.Interface
|
||||
nodes []*v1.Node
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode int
|
||||
nodepodCount nodePodEvictedCount
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
ignorePvcPods bool
|
||||
}
|
||||
|
||||
func NewPodEvictor(
|
||||
@@ -60,6 +66,8 @@ func NewPodEvictor(
|
||||
maxPodsToEvictPerNode int,
|
||||
nodes []*v1.Node,
|
||||
evictLocalStoragePods bool,
|
||||
evictSystemCriticalPods bool,
|
||||
ignorePvcPods bool,
|
||||
) *PodEvictor {
|
||||
var nodePodCount = make(nodePodEvictedCount)
|
||||
for _, node := range nodes {
|
||||
@@ -68,12 +76,15 @@ func NewPodEvictor(
|
||||
}
|
||||
|
||||
return &PodEvictor{
|
||||
client: client,
|
||||
policyGroupVersion: policyGroupVersion,
|
||||
dryRun: dryRun,
|
||||
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
||||
nodepodCount: nodePodCount,
|
||||
evictLocalStoragePods: evictLocalStoragePods,
|
||||
client: client,
|
||||
nodes: nodes,
|
||||
policyGroupVersion: policyGroupVersion,
|
||||
dryRun: dryRun,
|
||||
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
||||
nodepodCount: nodePodCount,
|
||||
evictLocalStoragePods: evictLocalStoragePods,
|
||||
evictSystemCriticalPods: evictSystemCriticalPods,
|
||||
ignorePvcPods: ignorePvcPods,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,12 +105,13 @@ func (pe *PodEvictor) TotalEvicted() int {
|
||||
// EvictPod returns non-nil error only when evicting a pod on a node is not
|
||||
// possible (due to maxPodsToEvictPerNode constraint). Success is true when the pod
|
||||
// is evicted on the server side.
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node, reasons ...string) (bool, error) {
|
||||
var reason string
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node, strategy string, reasons ...string) (bool, error) {
|
||||
reason := strategy
|
||||
if len(reasons) > 0 {
|
||||
reason = " (" + strings.Join(reasons, ", ") + ")"
|
||||
reason += " (" + strings.Join(reasons, ", ") + ")"
|
||||
}
|
||||
if pe.maxPodsToEvictPerNode > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvictPerNode {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number reached", "strategy": strategy, "namespace": pod.Namespace}).Inc()
|
||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvictPerNode, node.Name)
|
||||
}
|
||||
|
||||
@@ -107,6 +119,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
|
||||
if err != nil {
|
||||
// err is used only for logging purposes
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", reason)
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace}).Inc()
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -120,6 +133,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
|
||||
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})
|
||||
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
|
||||
r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", reason))
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace}).Inc()
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -153,7 +167,9 @@ func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, poli
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
priority *int32
|
||||
priority *int32
|
||||
nodeFit bool
|
||||
labelSelector labels.Selector
|
||||
}
|
||||
|
||||
// WithPriorityThreshold sets a threshold for pod's priority class.
|
||||
@@ -165,6 +181,24 @@ func WithPriorityThreshold(priority int32) func(opts *Options) {
|
||||
}
|
||||
}
|
||||
|
||||
// WithNodeFit sets whether or not to consider taints, node selectors,
|
||||
// and pod affinity when evicting. A pod whose tolerations, node selectors,
|
||||
// and affinity match a node other than the one it is currently running on
|
||||
// is evictable.
|
||||
func WithNodeFit(nodeFit bool) func(opts *Options) {
|
||||
return func(opts *Options) {
|
||||
opts.nodeFit = nodeFit
|
||||
}
|
||||
}
|
||||
|
||||
// WithLabelSelector sets whether or not to apply label filtering when evicting.
|
||||
// Any pod matching the label selector is considered evictable.
|
||||
func WithLabelSelector(labelSelector labels.Selector) func(opts *Options) {
|
||||
return func(opts *Options) {
|
||||
opts.labelSelector = labelSelector
|
||||
}
|
||||
}
|
||||
|
||||
type constraint func(pod *v1.Pod) error
|
||||
|
||||
type evictable struct {
|
||||
@@ -181,34 +215,66 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
||||
}
|
||||
|
||||
ev := &evictable{}
|
||||
if !pe.evictSystemCriticalPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
// Moved from IsEvictable function to allow for disabling
|
||||
if utils.IsCriticalPriorityPod(pod) {
|
||||
return fmt.Errorf("pod has system critical priority")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if options.priority != nil {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if IsPodEvictableBasedOnPriority(pod, *options.priority) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("pod has higher priority than specified priority class threshold")
|
||||
})
|
||||
}
|
||||
}
|
||||
if !pe.evictLocalStoragePods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if IsPodWithLocalStorage(pod) {
|
||||
return fmt.Errorf("pod has local storage and descheduler is not configured with --evict-local-storage-pods")
|
||||
if utils.IsPodWithLocalStorage(pod) {
|
||||
return fmt.Errorf("pod has local storage and descheduler is not configured with evictLocalStoragePods")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if options.priority != nil {
|
||||
if pe.ignorePvcPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if IsPodEvictableBasedOnPriority(pod, *options.priority) {
|
||||
return nil
|
||||
if utils.IsPodWithPVC(pod) {
|
||||
return fmt.Errorf("pod has a PVC and descheduler is configured to ignore PVC pods")
|
||||
}
|
||||
return fmt.Errorf("pod has higher priority than specified priority class threshold")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if options.nodeFit {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if !nodeutil.PodFitsAnyOtherNode(pod, pe.nodes) {
|
||||
return fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if options.labelSelector != nil && !options.labelSelector.Empty() {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if !options.labelSelector.Matches(labels.Set(pod.Labels)) {
|
||||
return fmt.Errorf("pod labels do not match the labelSelector filter in the policy parameter")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
return ev
|
||||
}
|
||||
|
||||
// IsEvictable decides when a pod is evictable
|
||||
func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
|
||||
checkErrs := []error{}
|
||||
if IsCriticalPod(pod) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is critical"))
|
||||
}
|
||||
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if IsDaemonsetPod(ownerRefList) {
|
||||
if utils.IsDaemonsetPod(ownerRefList) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod"))
|
||||
}
|
||||
|
||||
@@ -216,10 +282,14 @@ func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod does not have any ownerrefs"))
|
||||
}
|
||||
|
||||
if IsMirrorPod(pod) {
|
||||
if utils.IsMirrorPod(pod) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
|
||||
}
|
||||
|
||||
if utils.IsStaticPod(pod) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a static pod"))
|
||||
}
|
||||
|
||||
for _, c := range ev.constraints {
|
||||
if err := c(pod); err != nil {
|
||||
checkErrs = append(checkErrs, err)
|
||||
@@ -230,43 +300,16 @@ func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
|
||||
klog.V(4).InfoS("Pod lacks an eviction annotation and fails the following checks", "pod", klog.KObj(pod), "checks", errors.NewAggregate(checkErrs).Error())
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func IsCriticalPod(pod *v1.Pod) bool {
|
||||
return utils.IsCriticalPod(pod)
|
||||
}
|
||||
|
||||
func IsDaemonsetPod(ownerRefList []metav1.OwnerReference) bool {
|
||||
for _, ownerRef := range ownerRefList {
|
||||
if ownerRef.Kind == "DaemonSet" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsMirrorPod checks whether the pod is a mirror pod.
|
||||
func IsMirrorPod(pod *v1.Pod) bool {
|
||||
return utils.IsMirrorPod(pod)
|
||||
}
|
||||
|
||||
// HaveEvictAnnotation checks if the pod have evict annotation
|
||||
func HaveEvictAnnotation(pod *v1.Pod) bool {
|
||||
_, found := pod.ObjectMeta.Annotations[evictPodAnnotationKey]
|
||||
return found
|
||||
}
|
||||
|
||||
func IsPodWithLocalStorage(pod *v1.Pod) bool {
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.HostPath != nil || volume.EmptyDir != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodEvictableBasedOnPriority checks if the given pod is evictable based on priority resolved from pod Spec.
|
||||
func IsPodEvictableBasedOnPriority(pod *v1.Pod, priority int32) bool {
|
||||
return pod.Spec.Priority == nil || *pod.Spec.Priority < priority
|
||||
|
||||
@@ -73,48 +73,78 @@ func TestIsEvictable(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||
lowPriority := int32(800)
|
||||
highPriority := int32(900)
|
||||
|
||||
nodeTaintKey := "hardware"
|
||||
nodeTaintValue := "gpu"
|
||||
|
||||
nodeLabelKey := "datacenter"
|
||||
nodeLabelValue := "east"
|
||||
type testCase struct {
|
||||
pod *v1.Pod
|
||||
runBefore func(*v1.Pod)
|
||||
evictLocalStoragePods bool
|
||||
priorityThreshold *int32
|
||||
result bool
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
runBefore func(*v1.Pod, []*v1.Node)
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
priorityThreshold *int32
|
||||
nodeFit bool
|
||||
result bool
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
{ // Normal pod eviction with normal ownerRefs
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, { // Normal pod eviction with normal ownerRefs and descheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p2", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, { // Normal pod eviction with replicaSet ownerRefs
|
||||
pod: test.BuildTestPod("p3", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, { // Normal pod eviction with replicaSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p4", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, { // Normal pod eviction with statefulSet ownerRefs
|
||||
pod: test.BuildTestPod("p18", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetStatefulSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, { // Normal pod eviction with statefulSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p19", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetStatefulSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, { // Pod not evicted because it is bound to a PV and evictLocalStoragePods = false
|
||||
pod: test.BuildTestPod("p5", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
@@ -127,11 +157,12 @@ func TestIsEvictable(t *testing.T) {
|
||||
},
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, { // Pod is evicted because it is bound to a PV and evictLocalStoragePods = true
|
||||
pod: test.BuildTestPod("p6", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
@@ -144,11 +175,12 @@ func TestIsEvictable(t *testing.T) {
|
||||
},
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: true,
|
||||
result: true,
|
||||
}, {
|
||||
evictLocalStoragePods: true,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, { // Pod is evicted because it is bound to a PV and evictLocalStoragePods = false, but it has scheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p7", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
@@ -162,52 +194,58 @@ func TestIsEvictable(t *testing.T) {
|
||||
},
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, { // Pod not evicted becasuse it is part of a daemonSet
|
||||
pod: test.BuildTestPod("p8", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, { // Pod is evicted becasuse it is part of a daemonSet, but it has scheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p9", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, { // Pod not evicted becasuse it is a mirror pod
|
||||
pod: test.BuildTestPod("p10", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, { // Pod is evicted becasuse it is a mirror pod, but it has scheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p11", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, { // Pod not evicted becasuse it has system critical priority
|
||||
pod: test.BuildTestPod("p12", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, { // Pod is evicted becasuse it has system critical priority, but it has scheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p13", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -215,41 +253,182 @@ func TestIsEvictable(t *testing.T) {
|
||||
"descheduler.alpha.kubernetes.io/evict": "true",
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, { // Pod not evicted becasuse it has a priority higher than the configured priority threshold
|
||||
pod: test.BuildTestPod("p14", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Priority = &highPriority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: false,
|
||||
}, {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: false,
|
||||
}, { // Pod is evicted becasuse it has a priority higher than the configured priority threshold, but it has scheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p15", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.Spec.Priority = &highPriority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, { // Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true
|
||||
pod: test.BuildTestPod("p16", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
result: true,
|
||||
}, { // Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p16", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
result: true,
|
||||
}, { // Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true
|
||||
pod: test.BuildTestPod("p17", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Priority = &highPriority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, { // Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p17", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.Spec.Priority = &highPriority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, { // Pod with no tolerations running on normal node, all other nodes tainted
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node2", 1000, 2000, 13, nil), test.BuildTestNode("node3", 1000, 2000, 13, nil)},
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
for _, node := range nodes {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
}, { // Pod with correct tolerations running on normal node, all other nodes tainted
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node2", 1000, 2000, 13, nil), test.BuildTestNode("node3", 1000, 2000, 13, nil)},
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
for _, node := range nodes {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, { // Pod with incorrect node selector
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: "fail",
|
||||
}
|
||||
}),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node2", 1000, 2000, 13, nil), test.BuildTestNode("node3", 1000, 2000, 13, nil)},
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
for _, node := range nodes {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
}, { // Pod with correct node selector
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node2", 1000, 2000, 13, nil), test.BuildTestNode("node3", 1000, 2000, 13, nil)},
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
for _, node := range nodes {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test.runBefore(test.pod)
|
||||
test.runBefore(test.pod, test.nodes)
|
||||
nodes := append(test.nodes, n1)
|
||||
|
||||
podEvictor := &PodEvictor{
|
||||
evictLocalStoragePods: test.evictLocalStoragePods,
|
||||
evictLocalStoragePods: test.evictLocalStoragePods,
|
||||
evictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||
nodes: nodes,
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable()
|
||||
var opts []func(opts *Options)
|
||||
if test.priorityThreshold != nil {
|
||||
evictable = podEvictor.Evictable(WithPriorityThreshold(*test.priorityThreshold))
|
||||
opts = append(opts, WithPriorityThreshold(*test.priorityThreshold))
|
||||
}
|
||||
if test.nodeFit {
|
||||
opts = append(opts, WithNodeFit(true))
|
||||
}
|
||||
evictable = podEvictor.Evictable(opts...)
|
||||
|
||||
result := evictable.IsEvictable(test.pod)
|
||||
if result != test.result {
|
||||
@@ -286,18 +465,18 @@ func TestPodTypes(t *testing.T) {
|
||||
}
|
||||
// A Mirror Pod.
|
||||
p4.Annotations = test.GetMirrorPodAnnotation()
|
||||
if !IsMirrorPod(p4) {
|
||||
if !utils.IsMirrorPod(p4) {
|
||||
t.Errorf("Expected p4 to be a mirror pod.")
|
||||
}
|
||||
if !IsPodWithLocalStorage(p3) {
|
||||
if !utils.IsPodWithLocalStorage(p3) {
|
||||
t.Errorf("Expected p3 to be a pod with local storage.")
|
||||
}
|
||||
ownerRefList := podutil.OwnerRef(p2)
|
||||
if !IsDaemonsetPod(ownerRefList) {
|
||||
if !utils.IsDaemonsetPod(ownerRefList) {
|
||||
t.Errorf("Expected p2 to be a daemonset pod.")
|
||||
}
|
||||
ownerRefList = podutil.OwnerRef(p1)
|
||||
if IsDaemonsetPod(ownerRefList) || IsPodWithLocalStorage(p1) || IsCriticalPod(p1) || IsMirrorPod(p1) {
|
||||
if utils.IsDaemonsetPod(ownerRefList) || utils.IsPodWithLocalStorage(p1) || utils.IsCriticalPriorityPod(p1) || utils.IsMirrorPod(p1) || utils.IsStaticPod(p1) {
|
||||
t.Errorf("Expected p1 to be a normal pod.")
|
||||
}
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
|
||||
// ReadyNodes returns ready nodes irrespective of whether they are
|
||||
// schedulable or not.
|
||||
func ReadyNodes(ctx context.Context, client clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) {
|
||||
func ReadyNodes(ctx context.Context, client clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeSelector string) ([]*v1.Node, error) {
|
||||
ns, err := labels.Parse(nodeSelector)
|
||||
if err != nil {
|
||||
return []*v1.Node{}, err
|
||||
@@ -96,7 +96,38 @@ func IsReady(node *v1.Node) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsNodeUnschedulable checks if the node is unschedulable. This is helper function to check only in case of
|
||||
// PodFitsAnyOtherNode checks if the given pod fits any of the given nodes, besides the node
|
||||
// the pod is already running on. The node fit is based on multiple criteria, like, pod node selector
|
||||
// matching the node label (including affinity), the taints on the node, and the node being schedulable or not.
|
||||
func PodFitsAnyOtherNode(pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
|
||||
for _, node := range nodes {
|
||||
// Skip node pod is already on
|
||||
if node.Name == pod.Spec.NodeName {
|
||||
continue
|
||||
}
|
||||
// Check node selector and required affinity
|
||||
ok, err := utils.PodMatchNodeSelector(pod, node)
|
||||
if err != nil || !ok {
|
||||
continue
|
||||
}
|
||||
// Check taints (we only care about NoSchedule and NoExecute taints)
|
||||
ok = utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
|
||||
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
|
||||
})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// Check if node is schedulable
|
||||
if !IsNodeUnschedulable(node) {
|
||||
klog.V(2).InfoS("Pod can possibly be scheduled on a different node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsNodeUnschedulable checks if the node is unschedulable. This is a helper function to check only in case of
|
||||
// underutilized node so that they won't be accounted for.
|
||||
func IsNodeUnschedulable(node *v1.Node) bool {
|
||||
return node.Spec.Unschedulable
|
||||
|
||||
@@ -74,7 +74,7 @@ func TestReadyNodesWithNodeSelector(t *testing.T) {
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
defer close(stopChannel)
|
||||
|
||||
nodes, _ := ReadyNodes(ctx, fakeClient, nodeInformer, nodeSelector, nil)
|
||||
nodes, _ := ReadyNodes(ctx, fakeClient, nodeInformer, nodeSelector)
|
||||
|
||||
if nodes[0].Name != "node1" {
|
||||
t.Errorf("Expected node1, got %s", nodes[0].Name)
|
||||
@@ -200,10 +200,15 @@ func TestPodFitsCurrentNode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodFitsAnyNode(t *testing.T) {
|
||||
func TestPodFitsAnyOtherNode(t *testing.T) {
|
||||
|
||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||
nodeLabelValue := "yes"
|
||||
nodeTaintKey := "hardware"
|
||||
nodeTaintValue := "gpu"
|
||||
|
||||
// Staging node has no scheduling restrictions, but the pod always starts here and PodFitsAnyOtherNode() doesn't take into account the node the pod is running on.
|
||||
nodeNames := []string{"node1", "node2", "stagingNode"}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
@@ -212,33 +217,12 @@ func TestPodFitsAnyNode(t *testing.T) {
|
||||
success bool
|
||||
}{
|
||||
{
|
||||
description: "Pod expected to fit one of the nodes",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
description: "Pod fits another node matching node affinity",
|
||||
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue),
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[0],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
@@ -246,42 +230,27 @@ func TestPodFitsAnyNode(t *testing.T) {
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[1],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[2],
|
||||
},
|
||||
},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "Pod expected to fit one of the nodes (error node first)",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
description: "Pod expected to fit one of the nodes",
|
||||
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue),
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[0],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
},
|
||||
@@ -289,42 +258,27 @@ func TestPodFitsAnyNode(t *testing.T) {
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[1],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[2],
|
||||
},
|
||||
},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "Pod expected to fit none of the nodes",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue),
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[0],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "unfit1",
|
||||
},
|
||||
@@ -332,42 +286,27 @@ func TestPodFitsAnyNode(t *testing.T) {
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[1],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "unfit2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[2],
|
||||
},
|
||||
},
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "Nodes are unschedulable but labels match, should fail",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue),
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[0],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
@@ -378,20 +317,136 @@ func TestPodFitsAnyNode(t *testing.T) {
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[1],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[2],
|
||||
},
|
||||
},
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "Two nodes matches node selector, one of them is tained, should pass",
|
||||
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue),
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[0],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[1],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[2],
|
||||
},
|
||||
},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "Both nodes are tained, should fail",
|
||||
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue),
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[0],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[1],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[2],
|
||||
},
|
||||
},
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
actual := PodFitsAnyNode(tc.pod, tc.nodes)
|
||||
actual := PodFitsAnyOtherNode(tc.pod, tc.nodes)
|
||||
if actual != tc.success {
|
||||
t.Errorf("Test %#v failed", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createPodManifest(nodeName string, nodeSelectorKey string, nodeSelectorValue string) *v1.Pod {
|
||||
return (&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeSelectorKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeSelectorValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ type Options struct {
|
||||
filter func(pod *v1.Pod) bool
|
||||
includedNamespaces []string
|
||||
excludedNamespaces []string
|
||||
labelSelector *metav1.LabelSelector
|
||||
}
|
||||
|
||||
// WithFilter sets a pod filter.
|
||||
@@ -55,6 +56,13 @@ func WithoutNamespaces(namespaces []string) func(opts *Options) {
|
||||
}
|
||||
}
|
||||
|
||||
// WithLabelSelector sets a pod label selector
|
||||
func WithLabelSelector(labelSelector *metav1.LabelSelector) func(opts *Options) {
|
||||
return func(opts *Options) {
|
||||
opts.labelSelector = labelSelector
|
||||
}
|
||||
}
|
||||
|
||||
// ListPodsOnANode lists all of the pods on a node
|
||||
// It also accepts an optional "filter" function which can be used to further limit the pods that are returned.
|
||||
// (Usually this is podEvictor.Evictable().IsEvictable, in order to only list the evictable pods on a node, but can
|
||||
@@ -74,6 +82,15 @@ func ListPodsOnANode(
|
||||
|
||||
fieldSelectorString := "spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)
|
||||
|
||||
labelSelectorString := ""
|
||||
if options.labelSelector != nil {
|
||||
selector, err := metav1.LabelSelectorAsSelector(options.labelSelector)
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
labelSelectorString = selector.String()
|
||||
}
|
||||
|
||||
if len(options.includedNamespaces) > 0 {
|
||||
fieldSelector, err := fields.ParseSelector(fieldSelectorString)
|
||||
if err != nil {
|
||||
@@ -82,7 +99,10 @@ func ListPodsOnANode(
|
||||
|
||||
for _, namespace := range options.includedNamespaces {
|
||||
podList, err := client.CoreV1().Pods(namespace).List(ctx,
|
||||
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
||||
metav1.ListOptions{
|
||||
FieldSelector: fieldSelector.String(),
|
||||
LabelSelector: labelSelectorString,
|
||||
})
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
@@ -111,7 +131,10 @@ func ListPodsOnANode(
|
||||
// Once the descheduler switches to pod listers (through informers),
|
||||
// We need to flip to client-side filtering.
|
||||
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(ctx,
|
||||
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
||||
metav1.ListOptions{
|
||||
FieldSelector: fieldSelector.String(),
|
||||
LabelSelector: labelSelectorString,
|
||||
})
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
@@ -40,6 +41,7 @@ func TestListPodsOnANode(t *testing.T) {
|
||||
name string
|
||||
pods map[string][]v1.Pod
|
||||
node *v1.Node
|
||||
labelSelector *metav1.LabelSelector
|
||||
expectedPodCount int
|
||||
}{
|
||||
{
|
||||
@@ -52,6 +54,33 @@ func TestListPodsOnANode(t *testing.T) {
|
||||
"n2": {*test.BuildTestPod("pod3", 100, 0, "n2", nil)},
|
||||
},
|
||||
node: test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
labelSelector: nil,
|
||||
expectedPodCount: 2,
|
||||
},
|
||||
{
|
||||
name: "test listing pods with label selector",
|
||||
pods: map[string][]v1.Pod{
|
||||
"n1": {
|
||||
*test.BuildTestPod("pod1", 100, 0, "n1", nil),
|
||||
*test.BuildTestPod("pod2", 100, 0, "n1", func(pod *v1.Pod) {
|
||||
pod.Labels = map[string]string{"foo": "bar"}
|
||||
}),
|
||||
*test.BuildTestPod("pod3", 100, 0, "n1", func(pod *v1.Pod) {
|
||||
pod.Labels = map[string]string{"foo": "bar1"}
|
||||
}),
|
||||
},
|
||||
"n2": {*test.BuildTestPod("pod4", 100, 0, "n2", nil)},
|
||||
},
|
||||
node: test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
labelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"bar", "bar1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPodCount: 2,
|
||||
},
|
||||
}
|
||||
@@ -67,7 +96,7 @@ func TestListPodsOnANode(t *testing.T) {
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node)
|
||||
pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node, WithLabelSelector(testCase.labelSelector))
|
||||
if len(pods) != testCase.expectedPodCount {
|
||||
t.Errorf("expected %v pods on node %v, got %+v", testCase.expectedPodCount, testCase.node.Name, len(pods))
|
||||
}
|
||||
|
||||
@@ -83,7 +83,12 @@ func RemoveDuplicatePods(
|
||||
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
nodeFit := false
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
duplicatePods := make(map[podOwner]map[string][]*v1.Pod)
|
||||
ownerKeyOccurence := make(map[podOwner]int32)
|
||||
@@ -183,9 +188,17 @@ func RemoveDuplicatePods(
|
||||
}
|
||||
|
||||
// 1. how many pods can be evicted to respect uniform placement of pods among viable nodes?
|
||||
for ownerKey, nodes := range duplicatePods {
|
||||
upperAvg := int(math.Ceil(float64(ownerKeyOccurence[ownerKey]) / float64(nodeCount)))
|
||||
for nodeName, pods := range nodes {
|
||||
for ownerKey, podNodes := range duplicatePods {
|
||||
targetNodes := getTargetNodes(podNodes, nodes)
|
||||
|
||||
klog.V(2).InfoS("Adjusting feasible nodes", "owner", ownerKey, "from", nodeCount, "to", len(targetNodes))
|
||||
if len(targetNodes) < 2 {
|
||||
klog.V(1).InfoS("Less than two feasible nodes for duplicates to land, skipping eviction", "owner", ownerKey)
|
||||
continue
|
||||
}
|
||||
|
||||
upperAvg := int(math.Ceil(float64(ownerKeyOccurence[ownerKey]) / float64(len(targetNodes))))
|
||||
for nodeName, pods := range podNodes {
|
||||
klog.V(2).InfoS("Average occurrence per node", "node", klog.KObj(nodeMap[nodeName]), "ownerKey", ownerKey, "avg", upperAvg)
|
||||
// list of duplicated pods does not contain the original referential pod
|
||||
if len(pods)+1 > upperAvg {
|
||||
@@ -202,6 +215,73 @@ func RemoveDuplicatePods(
|
||||
}
|
||||
}
|
||||
|
||||
func getNodeAffinityNodeSelector(pod *v1.Pod) *v1.NodeSelector {
|
||||
if pod.Spec.Affinity == nil {
|
||||
return nil
|
||||
}
|
||||
if pod.Spec.Affinity.NodeAffinity == nil {
|
||||
return nil
|
||||
}
|
||||
return pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
}
|
||||
|
||||
func getTargetNodes(podNodes map[string][]*v1.Pod, nodes []*v1.Node) []*v1.Node {
|
||||
// In order to reduce the number of pods processed, identify pods which have
|
||||
// equal (tolerations, nodeselectors, node affinity) terms and considered them
|
||||
// as identical. Identical pods wrt. (tolerations, nodeselectors, node affinity) terms
|
||||
// will produce the same result when checking if a pod is feasible for a node.
|
||||
// Thus, improving efficiency of processing pods marked for eviction.
|
||||
|
||||
// Collect all distinct pods which differ in at least taints, node affinity or node selector terms
|
||||
distinctPods := map[*v1.Pod]struct{}{}
|
||||
for _, pods := range podNodes {
|
||||
for _, pod := range pods {
|
||||
duplicated := false
|
||||
for dp := range distinctPods {
|
||||
if utils.TolerationsEqual(pod.Spec.Tolerations, dp.Spec.Tolerations) &&
|
||||
utils.NodeSelectorsEqual(getNodeAffinityNodeSelector(pod), getNodeAffinityNodeSelector(dp)) &&
|
||||
reflect.DeepEqual(pod.Spec.NodeSelector, dp.Spec.NodeSelector) {
|
||||
duplicated = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
if duplicated {
|
||||
continue
|
||||
}
|
||||
distinctPods[pod] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// For each distinct pod get a list of nodes where it can land
|
||||
targetNodesMap := map[string]*v1.Node{}
|
||||
for pod := range distinctPods {
|
||||
matchingNodes := map[string]*v1.Node{}
|
||||
for _, node := range nodes {
|
||||
if !utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
|
||||
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
|
||||
}) {
|
||||
continue
|
||||
}
|
||||
if match, err := utils.PodMatchNodeSelector(pod, node); err == nil && !match {
|
||||
continue
|
||||
}
|
||||
matchingNodes[node.Name] = node
|
||||
}
|
||||
if len(matchingNodes) > 1 {
|
||||
for nodeName := range matchingNodes {
|
||||
targetNodesMap[nodeName] = matchingNodes[nodeName]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
targetNodes := []*v1.Node{}
|
||||
for _, node := range targetNodesMap {
|
||||
targetNodes = append(targetNodes, node)
|
||||
}
|
||||
|
||||
return targetNodes
|
||||
}
|
||||
|
||||
func hasExcludedOwnerRefKind(ownerRefs []metav1.OwnerReference, strategy api.DeschedulerStrategy) bool {
|
||||
if strategy.Params == nil || strategy.Params.RemoveDuplicates == nil {
|
||||
return false
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -46,6 +47,25 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
// first setup pods
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "hardware",
|
||||
Value: "gpu",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
})
|
||||
node4 := test.BuildTestNode("n4", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"datacenter": "east",
|
||||
}
|
||||
})
|
||||
node5 := test.BuildTestNode("n5", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
})
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p1.Namespace = "dev"
|
||||
@@ -72,6 +92,14 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
p13.Namespace = "different-images"
|
||||
p14 := test.BuildTestPod("p14", 100, 0, node1.Name, nil)
|
||||
p14.Namespace = "different-images"
|
||||
p15 := test.BuildTestPod("p15", 100, 0, node1.Name, nil)
|
||||
p15.Namespace = "node-fit"
|
||||
p16 := test.BuildTestPod("NOT1", 100, 0, node1.Name, nil)
|
||||
p16.Namespace = "node-fit"
|
||||
p17 := test.BuildTestPod("NOT2", 100, 0, node1.Name, nil)
|
||||
p17.Namespace = "node-fit"
|
||||
p18 := test.BuildTestPod("TARGET", 100, 0, node1.Name, nil)
|
||||
p18.Namespace = "node-fit"
|
||||
|
||||
// ### Evictable Pods ###
|
||||
|
||||
@@ -125,10 +153,29 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
Image: "foo",
|
||||
})
|
||||
|
||||
// ### Pods Evictable Based On Node Fit ###
|
||||
|
||||
ownerRef3 := test.GetReplicaSetOwnerRefList()
|
||||
p15.ObjectMeta.OwnerReferences = ownerRef3
|
||||
p16.ObjectMeta.OwnerReferences = ownerRef3
|
||||
p17.ObjectMeta.OwnerReferences = ownerRef3
|
||||
p18.ObjectMeta.OwnerReferences = ownerRef3
|
||||
|
||||
p15.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
p16.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
p17.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
maxPodsToEvictPerNode int
|
||||
pods []v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedEvictedPodCount int
|
||||
strategy api.DeschedulerStrategy
|
||||
}{
|
||||
@@ -136,6 +183,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
@@ -143,6 +191,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{RemoveDuplicates: &api.RemoveDuplicates{ExcludeOwnerKinds: []string{"ReplicaSet"}}}},
|
||||
},
|
||||
@@ -150,6 +199,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p8, *p9, *p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
@@ -157,6 +207,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p8, *p9, *p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 2,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
@@ -164,6 +215,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
|
||||
maxPodsToEvictPerNode: 2,
|
||||
pods: []v1.Pod{*p4, *p5, *p6, *p7},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
@@ -171,6 +223,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
description: "Test all Pods: 4 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 2,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
@@ -178,6 +231,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
description: "Pods with the same owner but different images should not be evicted",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p11, *p12},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
@@ -185,6 +239,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
description: "Pods with multiple containers should not match themselves",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p13},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
@@ -192,9 +247,34 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p11, *p13},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. Only node available has a taint, and nodeFit set to true. 0 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet, all with a nodeSelector. Only node available has an incorrect node label, and nodeFit set to true. 0 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p15, *p16, *p17},
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available is not schedulable, and nodeFit set to true. 0 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
@@ -208,11 +288,13 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
"v1",
|
||||
false,
|
||||
testCase.maxPodsToEvictPerNode,
|
||||
[]*v1.Node{node1, node2},
|
||||
testCase.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, []*v1.Node{node1, node2}, podEvictor)
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||
@@ -237,6 +319,117 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
setTolerationsK1 := func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: "k1",
|
||||
Value: "v1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}
|
||||
setTolerationsK2 := func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: "k2",
|
||||
Value: "v2",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
setMasterNoScheduleTaint := func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
Key: "node-role.kubernetes.io/master",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
setMasterNoScheduleLabel := func(node *v1.Node) {
|
||||
if node.ObjectMeta.Labels == nil {
|
||||
node.ObjectMeta.Labels = map[string]string{}
|
||||
}
|
||||
node.ObjectMeta.Labels["node-role.kubernetes.io/master"] = ""
|
||||
}
|
||||
|
||||
setWorkerLabel := func(node *v1.Node) {
|
||||
if node.ObjectMeta.Labels == nil {
|
||||
node.ObjectMeta.Labels = map[string]string{}
|
||||
}
|
||||
node.ObjectMeta.Labels["node-role.kubernetes.io/worker"] = "k1"
|
||||
node.ObjectMeta.Labels["node-role.kubernetes.io/worker"] = "k2"
|
||||
}
|
||||
|
||||
setNotMasterNodeSelectorK1 := func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "node-role.kubernetes.io/master",
|
||||
Operator: v1.NodeSelectorOpDoesNotExist,
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpDoesNotExist,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
setNotMasterNodeSelectorK2 := func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "node-role.kubernetes.io/master",
|
||||
Operator: v1.NodeSelectorOpDoesNotExist,
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpDoesNotExist,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
setWorkerLabelSelectorK1 := func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
if pod.Spec.NodeSelector == nil {
|
||||
pod.Spec.NodeSelector = map[string]string{}
|
||||
}
|
||||
pod.Spec.NodeSelector["node-role.kubernetes.io/worker"] = "k1"
|
||||
}
|
||||
|
||||
setWorkerLabelSelectorK2 := func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
if pod.Spec.NodeSelector == nil {
|
||||
pod.Spec.NodeSelector = map[string]string{}
|
||||
}
|
||||
pod.Spec.NodeSelector["node-role.kubernetes.io/worker"] = "k2"
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
maxPodsToEvictPerNode int
|
||||
@@ -390,6 +583,106 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting taints",
|
||||
pods: []v1.Pod{
|
||||
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
|
||||
*test.BuildTestPod("p1", 100, 0, "worker1", setTolerationsK1),
|
||||
*test.BuildTestPod("p2", 100, 0, "worker1", setTolerationsK2),
|
||||
*test.BuildTestPod("p3", 100, 0, "worker1", setTolerationsK1),
|
||||
*test.BuildTestPod("p4", 100, 0, "worker1", setTolerationsK2),
|
||||
*test.BuildTestPod("p5", 100, 0, "worker1", setTolerationsK1),
|
||||
*test.BuildTestPod("p6", 100, 0, "worker2", setTolerationsK2),
|
||||
*test.BuildTestPod("p7", 100, 0, "worker2", setTolerationsK1),
|
||||
*test.BuildTestPod("p8", 100, 0, "worker2", setTolerationsK2),
|
||||
*test.BuildTestPod("p9", 100, 0, "worker3", setTolerationsK1),
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("worker1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("worker2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("worker3", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master1", 2000, 3000, 10, setMasterNoScheduleTaint),
|
||||
test.BuildTestNode("master2", 2000, 3000, 10, setMasterNoScheduleTaint),
|
||||
test.BuildTestNode("master3", 2000, 3000, 10, setMasterNoScheduleTaint),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting RequiredDuringSchedulingIgnoredDuringExecution node affinity",
|
||||
pods: []v1.Pod{
|
||||
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
|
||||
*test.BuildTestPod("p1", 100, 0, "worker1", setNotMasterNodeSelectorK1),
|
||||
*test.BuildTestPod("p2", 100, 0, "worker1", setNotMasterNodeSelectorK2),
|
||||
*test.BuildTestPod("p3", 100, 0, "worker1", setNotMasterNodeSelectorK1),
|
||||
*test.BuildTestPod("p4", 100, 0, "worker1", setNotMasterNodeSelectorK2),
|
||||
*test.BuildTestPod("p5", 100, 0, "worker1", setNotMasterNodeSelectorK1),
|
||||
*test.BuildTestPod("p6", 100, 0, "worker2", setNotMasterNodeSelectorK2),
|
||||
*test.BuildTestPod("p7", 100, 0, "worker2", setNotMasterNodeSelectorK1),
|
||||
*test.BuildTestPod("p8", 100, 0, "worker2", setNotMasterNodeSelectorK2),
|
||||
*test.BuildTestPod("p9", 100, 0, "worker3", setNotMasterNodeSelectorK1),
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("worker1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("worker2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("worker3", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master1", 2000, 3000, 10, setMasterNoScheduleLabel),
|
||||
test.BuildTestNode("master2", 2000, 3000, 10, setMasterNoScheduleLabel),
|
||||
test.BuildTestNode("master3", 2000, 3000, 10, setMasterNoScheduleLabel),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting node selector",
|
||||
pods: []v1.Pod{
|
||||
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
|
||||
*test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("worker1", 2000, 3000, 10, setWorkerLabel),
|
||||
test.BuildTestNode("worker2", 2000, 3000, 10, setWorkerLabel),
|
||||
test.BuildTestNode("worker3", 2000, 3000, 10, setWorkerLabel),
|
||||
test.BuildTestNode("master1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting node selector with zero target nodes",
|
||||
pods: []v1.Pod{
|
||||
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
|
||||
*test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
|
||||
*test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
*test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("worker1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("worker2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("worker3", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
@@ -400,11 +693,13 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
})
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
testCase.maxPodsToEvictPerNode,
|
||||
testCase.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor)
|
||||
|
||||
@@ -64,7 +64,12 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
nodeFit := false
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
||||
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||
@@ -85,6 +90,7 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
}),
|
||||
podutil.WithNamespaces(includedNamespaces),
|
||||
podutil.WithoutNamespaces(excludedNamespaces),
|
||||
podutil.WithLabelSelector(strategy.Params.LabelSelector),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get pods", "node", klog.KObj(node))
|
||||
@@ -104,5 +110,4 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
klog.ErrorS(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||
}
|
||||
}
|
||||
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
|
||||
}
|
||||
|
||||
@@ -20,7 +20,8 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
@@ -40,6 +41,16 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
NodeAffinityType: []string{
|
||||
"requiredDuringSchedulingIgnoredDuringExecution",
|
||||
},
|
||||
NodeFit: true,
|
||||
},
|
||||
}
|
||||
|
||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||
nodeLabelValue := "yes"
|
||||
nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10, nil)
|
||||
@@ -137,11 +148,19 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, and node where pod fits is available, should evict",
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
|
||||
maxPodsToEvictPerNode: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@@ -153,11 +172,13 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
@@ -54,9 +55,13 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces []string
|
||||
if strategy.Params != nil && strategy.Params.Namespaces != nil {
|
||||
includedNamespaces = strategy.Params.Namespaces.Include
|
||||
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||
var labelSelector *metav1.LabelSelector
|
||||
if strategy.Params != nil {
|
||||
if strategy.Params.Namespaces != nil {
|
||||
includedNamespaces = strategy.Params.Namespaces.Include
|
||||
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||
}
|
||||
labelSelector = strategy.Params.LabelSelector
|
||||
}
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
@@ -65,7 +70,12 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
return
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
nodeFit := false
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
@@ -76,6 +86,7 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
podutil.WithFilter(evictable.IsEvictable),
|
||||
podutil.WithNamespaces(includedNamespaces),
|
||||
podutil.WithoutNamespaces(excludedNamespaces),
|
||||
podutil.WithLabelSelector(labelSelector),
|
||||
)
|
||||
if err != nil {
|
||||
//no pods evicted as error encountered retrieving evictable Pods
|
||||
|
||||
@@ -5,7 +5,8 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@@ -50,6 +51,17 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
node1 = addTaintsToNode(node2, "testingTaint", "testing", []int{1})
|
||||
|
||||
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"datacenter": "east",
|
||||
}
|
||||
})
|
||||
node4 := test.BuildTestNode("n4", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
})
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||
@@ -69,12 +81,15 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node2.Name, nil)
|
||||
p11 := test.BuildTestPod("p11", 100, 0, node2.Name, nil)
|
||||
p11.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p12 := test.BuildTestPod("p11", 100, 0, node2.Name, nil)
|
||||
p12.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
// The following 4 pods won't get evicted.
|
||||
// A Critical Pod.
|
||||
p7.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
p7.Spec.Priority = &priority
|
||||
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
// A daemonset.
|
||||
p8.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
@@ -97,13 +112,19 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
p3 = addTolerationToPod(p3, "testTaint", "test", 1)
|
||||
p4 = addTolerationToPod(p4, "testTaintX", "testX", 1)
|
||||
|
||||
p12.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
nodes []*v1.Node
|
||||
pods []v1.Pod
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
maxPodsToEvictPerNode int
|
||||
expectedEvictedPodCount int
|
||||
nodeFit bool
|
||||
}{
|
||||
|
||||
{
|
||||
@@ -111,6 +132,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 1, //p2 gets evicted
|
||||
},
|
||||
@@ -119,6 +141,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p1, *p3, *p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 1, //p4 gets evicted
|
||||
},
|
||||
@@ -127,6 +150,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p1, *p5, *p6},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 1,
|
||||
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
|
||||
},
|
||||
@@ -135,24 +159,66 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 0,
|
||||
expectedEvictedPodCount: 0, //nothing is evicted
|
||||
},
|
||||
{
|
||||
description: "Critical pods except storage pods not tolerating node taint should not be evicted",
|
||||
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: true,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 1,
|
||||
expectedEvictedPodCount: 1, //p9 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Critical and non critical pods, only non critical pods not tolerating node taint should be evicted",
|
||||
pods: []v1.Pod{*p7, *p8, *p10, *p11},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 1,
|
||||
expectedEvictedPodCount: 1, //p11 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Critical and non critical pods, pods not tolerating node taint should be evicted even if they are critical",
|
||||
pods: []v1.Pod{*p2, *p7, *p9, *p10},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 2, //p2 and p7 are evicted
|
||||
},
|
||||
{
|
||||
description: "Pod p2 doesn't tolerate taint on it's node, but also doesn't tolerate taints on other nodes",
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 0, //p2 gets evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "Pod p12 doesn't tolerate taint on it's node, but other nodes don't match it's selector",
|
||||
pods: []v1.Pod{*p1, *p3, *p12},
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 0, //p2 gets evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "Pod p2 doesn't tolerate taint on it's node, but other nodes are unschedulable",
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedEvictedPodCount: 0, //p2 gets evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -166,14 +232,22 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.nodes,
|
||||
tc.evictLocalStoragePods,
|
||||
tc.evictSystemCriticalPods,
|
||||
false,
|
||||
)
|
||||
|
||||
RemovePodsViolatingNodeTaints(ctx, fakeClient, api.DeschedulerStrategy{}, tc.nodes, podEvictor)
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: tc.nodeFit,
|
||||
},
|
||||
}
|
||||
|
||||
RemovePodsViolatingNodeTaints(ctx, fakeClient, strategy, tc.nodes, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
||||
|
||||
@@ -0,0 +1,164 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its strategy.
|
||||
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
|
||||
func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
|
||||
klog.ErrorS(err, "Invalid HighNodeUtilization parameters")
|
||||
return
|
||||
}
|
||||
|
||||
nodeFit := false
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
||||
if err := validateHighUtilizationStrategyConfig(thresholds, targetThresholds); err != nil {
|
||||
klog.ErrorS(err, "HighNodeUtilization config is not valid")
|
||||
return
|
||||
}
|
||||
targetThresholds = make(api.ResourceThresholds)
|
||||
|
||||
setDefaultForThresholds(thresholds, targetThresholds)
|
||||
resourceNames := getResourceNames(targetThresholds)
|
||||
|
||||
sourceNodes, highNodes := classifyNodes(
|
||||
getNodeUsage(ctx, client, nodes, thresholds, targetThresholds, resourceNames),
|
||||
func(node *v1.Node, usage NodeUsage) bool {
|
||||
return isNodeWithLowUtilization(usage)
|
||||
},
|
||||
func(node *v1.Node, usage NodeUsage) bool {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
klog.V(2).InfoS("Node is unschedulable", "node", klog.KObj(node))
|
||||
return false
|
||||
}
|
||||
return !isNodeWithLowUtilization(usage)
|
||||
})
|
||||
|
||||
// log message in one line
|
||||
keysAndValues := []interface{}{
|
||||
"CPU", thresholds[v1.ResourceCPU],
|
||||
"Mem", thresholds[v1.ResourceMemory],
|
||||
"Pods", thresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range thresholds {
|
||||
if !isBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), int64(thresholds[name]))
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(1).InfoS("Criteria for a node below target utilization", keysAndValues...)
|
||||
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(sourceNodes))
|
||||
|
||||
if len(sourceNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||
return
|
||||
}
|
||||
if len(sourceNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
|
||||
klog.V(1).InfoS("Number of nodes underutilized is less than NumberOfNodes, nothing to do here", "underutilizedNodes", len(sourceNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
|
||||
return
|
||||
}
|
||||
if len(sourceNodes) == len(nodes) {
|
||||
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||
return
|
||||
}
|
||||
if len(highNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is available to schedule the pods, nothing to do here")
|
||||
return
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
// stop if the total available usage has dropped to zero - no more pods can be scheduled
|
||||
continueEvictionCond := func(nodeUsage NodeUsage, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||
for name := range totalAvailableUsage {
|
||||
if totalAvailableUsage[name].CmpInt64(0) < 1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
evictPodsFromSourceNodes(
|
||||
ctx,
|
||||
sourceNodes,
|
||||
highNodes,
|
||||
podEvictor,
|
||||
evictable.IsEvictable,
|
||||
resourceNames,
|
||||
"HighNodeUtilization",
|
||||
continueEvictionCond)
|
||||
|
||||
}
|
||||
|
||||
func validateHighUtilizationStrategyConfig(thresholds, targetThresholds api.ResourceThresholds) error {
|
||||
if targetThresholds != nil {
|
||||
return fmt.Errorf("targetThresholds is not applicable for HighNodeUtilization")
|
||||
}
|
||||
if err := validateThresholds(thresholds); err != nil {
|
||||
return fmt.Errorf("thresholds config is not valid: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setDefaultForThresholds(thresholds, targetThresholds api.ResourceThresholds) {
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceCPU]; !ok {
|
||||
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceMemory]; !ok {
|
||||
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
}
|
||||
|
||||
// Default targetThreshold resource values to 100
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
|
||||
for name := range thresholds {
|
||||
if !isBasicResource(name) {
|
||||
targetThresholds[name] = MaxResourcePercentage
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,784 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestHighNodeUtilization(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
n1NodeName := "n1"
|
||||
n2NodeName := "n2"
|
||||
n3NodeName := "n3"
|
||||
|
||||
nodeSelectorKey := "datacenter"
|
||||
nodeSelectorValue := "west"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
nodes map[string]*v1.Node
|
||||
pods map[string]*v1.PodList
|
||||
maxPodsToEvictPerNode int
|
||||
expectedPodsEvicted int
|
||||
evictedPods []string
|
||||
}{
|
||||
{
|
||||
name: "no node below threshold usage",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p8", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p9", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "no evictable pods",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 40,
|
||||
v1.ResourcePods: 40,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetDSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetDSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p5", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p8", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "no node to schedule evicted pods",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These can't be evicted.
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These can't be evicted.
|
||||
*test.BuildTestPod("p2", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p3", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 2,
|
||||
evictedPods: []string{"p1", "p7"},
|
||||
},
|
||||
{
|
||||
name: "without priorities stop when resource capacity is depleted",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 2000, 3000, 10, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 2000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 2000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p2", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p6", 400, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 1,
|
||||
},
|
||||
{
|
||||
name: "with priorities",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 2000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 2000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodPriority(pod, lowPriority)
|
||||
}),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p7", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p8", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p9", 400, 0, n3NodeName, test.SetDSOwnerRef),
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 1,
|
||||
evictedPods: []string{"p1"},
|
||||
},
|
||||
{
|
||||
name: "without priorities evicting best-effort pods only",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 3000, 3000, 10, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 3000, 3000, 5, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 3000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 1,
|
||||
evictedPods: []string{"p1"},
|
||||
},
|
||||
{
|
||||
name: "with extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
extendedResource: 40,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 8)
|
||||
}),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 8)
|
||||
}),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
*test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
// These won't be evicted
|
||||
*test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetDSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p3", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
*test.BuildTestPod("p4", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
*test.BuildTestPod("p5", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
*test.BuildTestPod("p6", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 2,
|
||||
evictedPods: []string{"p1", "p2"},
|
||||
},
|
||||
{
|
||||
name: "with extended resource in some of nodes",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 40,
|
||||
extendedResource: 40,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 8)
|
||||
}),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
//These won't be evicted
|
||||
*test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
*test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p3", 500, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 500, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 500, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 500, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {
|
||||
Items: []v1.Pod{},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "Other node match pod node selector",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeSelectorKey: nodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
// A pod selecting nodes in the "west" datacenter
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeSelectorKey: nodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 1,
|
||||
},
|
||||
{
|
||||
name: "Other node does not match pod node selector",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
// A pod selecting nodes in the "west" datacenter
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeSelectorKey: nodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
list := action.(core.ListAction)
|
||||
fieldString := list.GetListRestrictions().Fields.String()
|
||||
if strings.Contains(fieldString, n1NodeName) {
|
||||
return true, test.pods[n1NodeName], nil
|
||||
}
|
||||
if strings.Contains(fieldString, n2NodeName) {
|
||||
return true, test.pods[n2NodeName], nil
|
||||
}
|
||||
if strings.Contains(fieldString, n3NodeName) {
|
||||
return true, test.pods[n3NodeName], nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.GetAction)
|
||||
if node, exists := test.nodes[getAction.GetName()]; exists {
|
||||
return true, node, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
})
|
||||
podsForEviction := make(map[string]struct{})
|
||||
for _, pod := range test.evictedPods {
|
||||
podsForEviction[pod] = struct{}{}
|
||||
}
|
||||
|
||||
evictionFailed := false
|
||||
if len(test.evictedPods) > 0 {
|
||||
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.CreateAction)
|
||||
obj := getAction.GetObject()
|
||||
if eviction, ok := obj.(*v1beta1.Eviction); ok {
|
||||
if _, exists := podsForEviction[eviction.Name]; exists {
|
||||
return true, obj, nil
|
||||
}
|
||||
evictionFailed = true
|
||||
return true, nil, fmt.Errorf("pod %q was unexpectedly evicted", eviction.Name)
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
}
|
||||
|
||||
var nodes []*v1.Node
|
||||
for _, node := range test.nodes {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
test.maxPodsToEvictPerNode,
|
||||
nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
||||
Thresholds: test.thresholds,
|
||||
},
|
||||
NodeFit: true,
|
||||
},
|
||||
}
|
||||
HighNodeUtilization(ctx, fakeClient, strategy, nodes, podEvictor)
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if test.expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
if evictionFailed {
|
||||
t.Errorf("Pod evictions failed unexpectedly")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateHighNodeUtilizationStrategyConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
targetThresholds api.ResourceThresholds
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
name: "passing target thresholds",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("targetThresholds is not applicable for HighNodeUtilization"),
|
||||
},
|
||||
{
|
||||
name: "passing empty thresholds",
|
||||
thresholds: api.ResourceThresholds{},
|
||||
errInfo: fmt.Errorf("thresholds config is not valid: no resource threshold is configured"),
|
||||
},
|
||||
{
|
||||
name: "passing invalid thresholds",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 120,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
|
||||
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
|
||||
},
|
||||
{
|
||||
name: "passing valid strategy config",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing valid strategy config with extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
validateErr := validateHighUtilizationStrategyConfig(testCase.thresholds, testCase.targetThresholds)
|
||||
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of strategy config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 40,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
n1 := test.BuildTestNode("n1", 1000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 1000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("n3", 1000, 3000, 10, nil)
|
||||
n3withTaints := n3.DeepCopy()
|
||||
n3withTaints.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "key",
|
||||
Value: "value",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
|
||||
podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name, test.SetRSOwnerRef)
|
||||
podThatToleratesTaint.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: "key",
|
||||
Value: "value",
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
evictionsExpected int
|
||||
}{
|
||||
{
|
||||
name: "No taints",
|
||||
nodes: []*v1.Node{n1, n2, n3},
|
||||
pods: []*v1.Pod{
|
||||
//Node 1 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
// Node 2 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n2.Name), 200, 0, n2.Name, test.SetRSOwnerRef),
|
||||
},
|
||||
evictionsExpected: 1,
|
||||
},
|
||||
{
|
||||
name: "No pod tolerates node taint",
|
||||
nodes: []*v1.Node{n1, n3withTaints},
|
||||
pods: []*v1.Pod{
|
||||
//Node 1 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
// Node 3 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
|
||||
},
|
||||
evictionsExpected: 0,
|
||||
},
|
||||
{
|
||||
name: "Pod which tolerates node taint",
|
||||
nodes: []*v1.Node{n1, n3withTaints},
|
||||
pods: []*v1.Pod{
|
||||
//Node 1 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 100, 0, n1.Name, test.SetRSOwnerRef),
|
||||
podThatToleratesTaint,
|
||||
// Node 3 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 500, 0, n3withTaints.Name, test.SetRSOwnerRef),
|
||||
},
|
||||
evictionsExpected: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range tests {
|
||||
t.Run(item.name, func(t *testing.T) {
|
||||
var objs []runtime.Object
|
||||
for _, node := range item.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
|
||||
for _, pod := range item.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"policy/v1",
|
||||
false,
|
||||
item.evictionsExpected,
|
||||
item.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor)
|
||||
|
||||
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
187
pkg/descheduler/strategies/nodeutilization/lownodeutilization.go
Normal file
187
pkg/descheduler/strategies/nodeutilization/lownodeutilization.go
Normal file
@@ -0,0 +1,187 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
|
||||
// to calculate nodes' utilization and not the actual resource usage.
|
||||
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
|
||||
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
|
||||
klog.ErrorS(err, "Invalid LowNodeUtilization parameters")
|
||||
return
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
nodeFit := false
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
||||
if err := validateLowUtilizationStrategyConfig(thresholds, targetThresholds); err != nil {
|
||||
klog.ErrorS(err, "LowNodeUtilization config is not valid")
|
||||
return
|
||||
}
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceCPU]; !ok {
|
||||
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceMemory]; !ok {
|
||||
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
}
|
||||
resourceNames := getResourceNames(thresholds)
|
||||
|
||||
lowNodes, sourceNodes := classifyNodes(
|
||||
getNodeUsage(ctx, client, nodes, thresholds, targetThresholds, resourceNames),
|
||||
// The node has to be schedulable (to be able to move workload there)
|
||||
func(node *v1.Node, usage NodeUsage) bool {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
klog.V(2).InfoS("Node is unschedulable, thus not considered as underutilized", "node", klog.KObj(node))
|
||||
return false
|
||||
}
|
||||
return isNodeWithLowUtilization(usage)
|
||||
},
|
||||
func(node *v1.Node, usage NodeUsage) bool {
|
||||
return isNodeAboveTargetUtilization(usage)
|
||||
},
|
||||
)
|
||||
|
||||
// log message in one line
|
||||
keysAndValues := []interface{}{
|
||||
"CPU", thresholds[v1.ResourceCPU],
|
||||
"Mem", thresholds[v1.ResourceMemory],
|
||||
"Pods", thresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range thresholds {
|
||||
if !isBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), int64(thresholds[name]))
|
||||
}
|
||||
}
|
||||
klog.V(1).InfoS("Criteria for a node under utilization", keysAndValues...)
|
||||
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
|
||||
// log message in one line
|
||||
keysAndValues = []interface{}{
|
||||
"CPU", targetThresholds[v1.ResourceCPU],
|
||||
"Mem", targetThresholds[v1.ResourceMemory],
|
||||
"Pods", targetThresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range targetThresholds {
|
||||
if !isBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), int64(targetThresholds[name]))
|
||||
}
|
||||
}
|
||||
klog.V(1).InfoS("Criteria for a node above target utilization", keysAndValues...)
|
||||
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(sourceNodes))
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||
return
|
||||
}
|
||||
|
||||
if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
|
||||
klog.V(1).InfoS("Number of nodes underutilized is less than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
|
||||
return
|
||||
}
|
||||
|
||||
if len(lowNodes) == len(nodes) {
|
||||
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||
return
|
||||
}
|
||||
|
||||
if len(sourceNodes) == 0 {
|
||||
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
|
||||
return
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
||||
continueEvictionCond := func(nodeUsage NodeUsage, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||
if !isNodeAboveTargetUtilization(nodeUsage) {
|
||||
return false
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
if totalAvailableUsage[name].CmpInt64(0) < 1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
evictPodsFromSourceNodes(
|
||||
ctx,
|
||||
sourceNodes,
|
||||
lowNodes,
|
||||
podEvictor,
|
||||
evictable.IsEvictable,
|
||||
resourceNames,
|
||||
"LowNodeUtilization",
|
||||
continueEvictionCond)
|
||||
|
||||
klog.V(1).InfoS("Total number of pods evicted", "evictedPods", podEvictor.TotalEvicted())
|
||||
}
|
||||
|
||||
// validateLowUtilizationStrategyConfig checks if the strategy's config is valid
|
||||
func validateLowUtilizationStrategyConfig(thresholds, targetThresholds api.ResourceThresholds) error {
|
||||
// validate thresholds and targetThresholds config
|
||||
if err := validateThresholds(thresholds); err != nil {
|
||||
return fmt.Errorf("thresholds config is not valid: %v", err)
|
||||
}
|
||||
if err := validateThresholds(targetThresholds); err != nil {
|
||||
return fmt.Errorf("targetThresholds config is not valid: %v", err)
|
||||
}
|
||||
|
||||
// validate if thresholds and targetThresholds have same resources configured
|
||||
if len(thresholds) != len(targetThresholds) {
|
||||
return fmt.Errorf("thresholds and targetThresholds configured different resources")
|
||||
}
|
||||
for resourceName, value := range thresholds {
|
||||
if targetValue, ok := targetThresholds[resourceName]; !ok {
|
||||
return fmt.Errorf("thresholds and targetThresholds configured different resources")
|
||||
} else if value > targetValue {
|
||||
return fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", resourceName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -34,17 +35,16 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
var (
|
||||
lowPriority = int32(0)
|
||||
highPriority = int32(10000)
|
||||
)
|
||||
|
||||
func TestLowNodeUtilization(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
n1NodeName := "n1"
|
||||
n2NodeName := "n2"
|
||||
n3NodeName := "n3"
|
||||
|
||||
nodeSelectorKey := "datacenter"
|
||||
nodeSelectorValue := "west"
|
||||
notMatchingNodeSelectorValue := "east"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
thresholds, targetThresholds api.ResourceThresholds
|
||||
@@ -103,7 +103,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
@@ -162,7 +162,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
@@ -300,7 +300,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
@@ -376,7 +376,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
@@ -385,6 +385,353 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
expectedPodsEvicted: 4,
|
||||
evictedPods: []string{"p1", "p2", "p4", "p5"},
|
||||
},
|
||||
{
|
||||
name: "with extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 30,
|
||||
extendedResource: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 50,
|
||||
extendedResource: 50,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 8)
|
||||
}),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 8)
|
||||
}),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with extended resource.
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
*test.BuildTestPod("p2", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
*test.BuildTestPod("p3", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
*test.BuildTestPod("p4", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
*test.BuildTestPod("p5", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
*test.BuildTestPod("p6", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
|
||||
*test.BuildTestPod("p7", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
*test.BuildTestPod("p8", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 0, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before extended resource is depleted
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
{
|
||||
name: "with extended resource in some of nodes",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 30,
|
||||
extendedResource: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 50,
|
||||
extendedResource: 50,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 8)
|
||||
}),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with extended resource.
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
// 0 pods available for eviction because there's no enough extended resource in node2
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but only other node is unschedulable",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
*test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but only other node doesn't match pod node selector for p4 and p5",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeSelectorKey: notMatchingNodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod selecting nodes in the "west" datacenter
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeSelectorKey: nodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod selecting nodes in the "west" datacenter
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeSelectorKey: nodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
*test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but only other node doesn't match pod node affinity for p4 and p5",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeSelectorKey: notMatchingNodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with affinity to run in the "west" datacenter upon scheduling
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeSelectorKey,
|
||||
Operator: "In",
|
||||
Values: []string{nodeSelectorValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with affinity to run in the "west" datacenter upon scheduling
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeSelectorKey,
|
||||
Operator: "In",
|
||||
Values: []string{nodeSelectorValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
*test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
}},
|
||||
},
|
||||
maxPodsToEvictPerNode: 0,
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
@@ -439,11 +786,13 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
test.maxPodsToEvictPerNode,
|
||||
nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
strategy := api.DeschedulerStrategy{
|
||||
@@ -453,6 +802,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
Thresholds: test.thresholds,
|
||||
TargetThresholds: test.targetThresholds,
|
||||
},
|
||||
NodeFit: true,
|
||||
},
|
||||
}
|
||||
LowNodeUtilization(ctx, fakeClient, strategy, nodes, podEvictor)
|
||||
@@ -468,7 +818,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateStrategyConfig(t *testing.T) {
|
||||
func TestValidateLowNodeUtilizationStrategyConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
@@ -488,19 +838,6 @@ func TestValidateStrategyConfig(t *testing.T) {
|
||||
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
|
||||
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
|
||||
},
|
||||
{
|
||||
name: "passing invalid targetThresholds",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
"resourceInvalid": 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("targetThresholds config is not valid: %v",
|
||||
fmt.Errorf("only cpu, memory, or pods thresholds can be specified")),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different num of resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
@@ -538,6 +875,60 @@ func TestValidateStrategyConfig(t *testing.T) {
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", v1.ResourceCPU),
|
||||
},
|
||||
{
|
||||
name: "only thresholds configured extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "only targetThresholds configured extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different extended resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
"example.com/bar": 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds' extended resource config value is greater than targetThresholds'",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 90,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 20,
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", extendedResource),
|
||||
},
|
||||
{
|
||||
name: "passing valid strategy config",
|
||||
thresholds: api.ResourceThresholds{
|
||||
@@ -550,10 +941,24 @@ func TestValidateStrategyConfig(t *testing.T) {
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing valid strategy config with extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
validateErr := validateStrategyConfig(testCase.thresholds, testCase.targetThresholds)
|
||||
validateErr := validateLowUtilizationStrategyConfig(testCase.thresholds, testCase.targetThresholds)
|
||||
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
@@ -567,86 +972,7 @@ func TestValidateStrategyConfig(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateThresholds(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input api.ResourceThresholds
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
name: "passing nil map for threshold",
|
||||
input: nil,
|
||||
errInfo: fmt.Errorf("no resource threshold is configured"),
|
||||
},
|
||||
{
|
||||
name: "passing no threshold",
|
||||
input: api.ResourceThresholds{},
|
||||
errInfo: fmt.Errorf("no resource threshold is configured"),
|
||||
},
|
||||
{
|
||||
name: "passing unsupported resource name",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 40,
|
||||
v1.ResourceStorage: 25.5,
|
||||
},
|
||||
errInfo: fmt.Errorf("only cpu, memory, or pods thresholds can be specified"),
|
||||
},
|
||||
{
|
||||
name: "passing invalid resource name",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 40,
|
||||
"coolResource": 42.0,
|
||||
},
|
||||
errInfo: fmt.Errorf("only cpu, memory, or pods thresholds can be specified"),
|
||||
},
|
||||
{
|
||||
name: "passing invalid resource value",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 110,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("%v threshold not in [%v, %v] range", v1.ResourceCPU, MinResourcePercentage, MaxResourcePercentage),
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with max and min resource value",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 100,
|
||||
v1.ResourceMemory: 0,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with only cpu",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with cpu, memory and pods",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 30,
|
||||
v1.ResourcePods: 40,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
validateErr := validateThresholds(test.input)
|
||||
|
||||
if validateErr == nil || test.errInfo == nil {
|
||||
if validateErr != test.errInfo {
|
||||
t.Errorf("expected validity of threshold: %#v to be %v but got %v instead", test.input, test.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != test.errInfo.Error() {
|
||||
t.Errorf("expected validity of threshold: %#v to be %v but got %v instead", test.input, test.errInfo, validateErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithTaints(t *testing.T) {
|
||||
func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
@@ -659,6 +985,7 @@ func TestWithTaints(t *testing.T) {
|
||||
v1.ResourcePods: 70,
|
||||
},
|
||||
},
|
||||
NodeFit: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -759,11 +1086,13 @@ func TestWithTaints(t *testing.T) {
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"policy/v1",
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
item.evictionsExpected,
|
||||
item.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
LowNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor)
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -14,23 +14,20 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// NodeUsage stores a node's info, pods on it, thresholds and its resource usage
|
||||
@@ -43,6 +40,8 @@ type NodeUsage struct {
|
||||
highResourceThreshold map[v1.ResourceName]*resource.Quantity
|
||||
}
|
||||
|
||||
type continueEvictionCond func(nodeUsage NodeUsage, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool
|
||||
|
||||
// NodePodsMap is a set of (node, pods) pairs
|
||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||
|
||||
@@ -53,7 +52,7 @@ const (
|
||||
MaxResourcePercentage = 100
|
||||
)
|
||||
|
||||
func validateLowNodeUtilizationParams(params *api.StrategyParameters) error {
|
||||
func validateNodeUtilizationParams(params *api.StrategyParameters) error {
|
||||
if params == nil || params.NodeResourceUtilizationThresholds == nil {
|
||||
return fmt.Errorf("NodeResourceUtilizationThresholds not set")
|
||||
}
|
||||
@@ -64,132 +63,14 @@ func validateLowNodeUtilizationParams(params *api.StrategyParameters) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
|
||||
// to calculate nodes' utilization and not the actual resource usage.
|
||||
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
|
||||
if err := validateLowNodeUtilizationParams(strategy.Params); err != nil {
|
||||
klog.ErrorS(err, "Invalid LowNodeUtilization parameters")
|
||||
return
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
||||
if err := validateStrategyConfig(thresholds, targetThresholds); err != nil {
|
||||
klog.ErrorS(err, "LowNodeUtilization config is not valid")
|
||||
return
|
||||
}
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceCPU]; !ok {
|
||||
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceMemory]; !ok {
|
||||
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
}
|
||||
|
||||
lowNodes, targetNodes := classifyNodes(
|
||||
getNodeUsage(ctx, client, nodes, thresholds, targetThresholds),
|
||||
// The node has to be schedulable (to be able to move workload there)
|
||||
func(node *v1.Node, usage NodeUsage) bool {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
klog.V(2).InfoS("Node is unschedulable, thus not considered as underutilized", "node", klog.KObj(node))
|
||||
return false
|
||||
}
|
||||
return isNodeWithLowUtilization(usage)
|
||||
},
|
||||
func(node *v1.Node, usage NodeUsage) bool {
|
||||
return isNodeAboveTargetUtilization(usage)
|
||||
},
|
||||
)
|
||||
|
||||
klog.V(1).InfoS("Criteria for a node under utilization",
|
||||
"CPU", thresholds[v1.ResourceCPU], "Mem", thresholds[v1.ResourceMemory], "Pods", thresholds[v1.ResourcePods])
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||
return
|
||||
}
|
||||
klog.V(1).InfoS("Total number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
|
||||
if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
|
||||
klog.V(1).InfoS("Number of nodes underutilized is less than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
|
||||
return
|
||||
}
|
||||
|
||||
if len(lowNodes) == len(nodes) {
|
||||
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||
return
|
||||
}
|
||||
|
||||
if len(targetNodes) == 0 {
|
||||
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
|
||||
return
|
||||
}
|
||||
|
||||
klog.V(1).InfoS("Criteria for a node above target utilization",
|
||||
"CPU", targetThresholds[v1.ResourceCPU], "Mem", targetThresholds[v1.ResourceMemory], "Pods", targetThresholds[v1.ResourcePods])
|
||||
|
||||
klog.V(1).InfoS("Number of nodes above target utilization", "totalNumber", len(targetNodes))
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
evictPodsFromTargetNodes(
|
||||
ctx,
|
||||
targetNodes,
|
||||
lowNodes,
|
||||
podEvictor,
|
||||
evictable.IsEvictable)
|
||||
|
||||
klog.V(1).InfoS("Total number of pods evicted", "evictedPods", podEvictor.TotalEvicted())
|
||||
}
|
||||
|
||||
// validateStrategyConfig checks if the strategy's config is valid
|
||||
func validateStrategyConfig(thresholds, targetThresholds api.ResourceThresholds) error {
|
||||
// validate thresholds and targetThresholds config
|
||||
if err := validateThresholds(thresholds); err != nil {
|
||||
return fmt.Errorf("thresholds config is not valid: %v", err)
|
||||
}
|
||||
if err := validateThresholds(targetThresholds); err != nil {
|
||||
return fmt.Errorf("targetThresholds config is not valid: %v", err)
|
||||
}
|
||||
|
||||
// validate if thresholds and targetThresholds have same resources configured
|
||||
if len(thresholds) != len(targetThresholds) {
|
||||
return fmt.Errorf("thresholds and targetThresholds configured different resources")
|
||||
}
|
||||
for resourceName, value := range thresholds {
|
||||
if targetValue, ok := targetThresholds[resourceName]; !ok {
|
||||
return fmt.Errorf("thresholds and targetThresholds configured different resources")
|
||||
} else if value > targetValue {
|
||||
return fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", resourceName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateThresholds checks if thresholds have valid resource name and resource percentage configured
|
||||
func validateThresholds(thresholds api.ResourceThresholds) error {
|
||||
if thresholds == nil || len(thresholds) == 0 {
|
||||
return fmt.Errorf("no resource threshold is configured")
|
||||
}
|
||||
for name, percent := range thresholds {
|
||||
switch name {
|
||||
case v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods:
|
||||
if percent < MinResourcePercentage || percent > MaxResourcePercentage {
|
||||
return fmt.Errorf("%v threshold not in [%v, %v] range", name, MinResourcePercentage, MaxResourcePercentage)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("only cpu, memory, or pods thresholds can be specified")
|
||||
if percent < MinResourcePercentage || percent > MaxResourcePercentage {
|
||||
return fmt.Errorf("%v threshold not in [%v, %v] range", name, MinResourcePercentage, MaxResourcePercentage)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -200,8 +81,9 @@ func getNodeUsage(
|
||||
client clientset.Interface,
|
||||
nodes []*v1.Node,
|
||||
lowThreshold, highThreshold api.ResourceThresholds,
|
||||
resourceNames []v1.ResourceName,
|
||||
) []NodeUsage {
|
||||
nodeUsageList := []NodeUsage{}
|
||||
var nodeUsageList []NodeUsage
|
||||
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node)
|
||||
@@ -210,28 +92,42 @@ func getNodeUsage(
|
||||
continue
|
||||
}
|
||||
|
||||
// A threshold is in percentages but in <0;100> interval.
|
||||
// Performing `threshold * 0.01` will convert <0;100> interval into <0;1>.
|
||||
// Multiplying it with capacity will give fraction of the capacity corresponding to the given high/low resource threshold in Quantity units.
|
||||
nodeCapacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
}
|
||||
lowResourceThreshold := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(int64(float64(lowThreshold[v1.ResourceCPU])*float64(nodeCapacity.Cpu().MilliValue())*0.01), resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(int64(float64(lowThreshold[v1.ResourceMemory])*float64(nodeCapacity.Memory().Value())*0.01), resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(float64(lowThreshold[v1.ResourcePods])*float64(nodeCapacity.Pods().Value())*0.01), resource.DecimalSI),
|
||||
}
|
||||
for _, name := range resourceNames {
|
||||
if !isBasicResource(name) {
|
||||
cap := nodeCapacity[name]
|
||||
lowResourceThreshold[name] = resource.NewQuantity(int64(float64(lowThreshold[name])*float64(cap.Value())*0.01), resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
highResourceThreshold := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(int64(float64(highThreshold[v1.ResourceCPU])*float64(nodeCapacity.Cpu().MilliValue())*0.01), resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(int64(float64(highThreshold[v1.ResourceMemory])*float64(nodeCapacity.Memory().Value())*0.01), resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(float64(highThreshold[v1.ResourcePods])*float64(nodeCapacity.Pods().Value())*0.01), resource.DecimalSI),
|
||||
}
|
||||
for _, name := range resourceNames {
|
||||
if !isBasicResource(name) {
|
||||
cap := nodeCapacity[name]
|
||||
highResourceThreshold[name] = resource.NewQuantity(int64(float64(highThreshold[name])*float64(cap.Value())*0.01), resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
|
||||
nodeUsageList = append(nodeUsageList, NodeUsage{
|
||||
node: node,
|
||||
usage: nodeUtilization(node, pods),
|
||||
allPods: pods,
|
||||
// A threshold is in percentages but in <0;100> interval.
|
||||
// Performing `threshold * 0.01` will convert <0;100> interval into <0;1>.
|
||||
// Multiplying it with capacity will give fraction of the capacity corresponding to the given high/low resource threshold in Quantity units.
|
||||
lowResourceThreshold: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(int64(float64(lowThreshold[v1.ResourceCPU])*float64(nodeCapacity.Cpu().MilliValue())*0.01), resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(int64(float64(lowThreshold[v1.ResourceMemory])*float64(nodeCapacity.Memory().Value())*0.01), resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(float64(lowThreshold[v1.ResourcePods])*float64(nodeCapacity.Pods().Value())*0.01), resource.DecimalSI),
|
||||
},
|
||||
highResourceThreshold: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(int64(float64(highThreshold[v1.ResourceCPU])*float64(nodeCapacity.Cpu().MilliValue())*0.01), resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(int64(float64(highThreshold[v1.ResourceMemory])*float64(nodeCapacity.Memory().Value())*0.01), resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(float64(highThreshold[v1.ResourcePods])*float64(nodeCapacity.Pods().Value())*0.01), resource.DecimalSI),
|
||||
},
|
||||
node: node,
|
||||
usage: nodeUtilization(node, pods, resourceNames),
|
||||
allPods: pods,
|
||||
lowResourceThreshold: lowResourceThreshold,
|
||||
highResourceThreshold: highResourceThreshold,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -248,7 +144,7 @@ func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
|
||||
for resourceName, resourceUsage := range nodeUsage.usage {
|
||||
cap := nodeCapacity[resourceName]
|
||||
if !cap.IsZero() {
|
||||
resourceUsagePercentage[resourceName] = 100 * float64(resourceUsage.Value()) / float64(cap.Value())
|
||||
resourceUsagePercentage[resourceName] = 100 * float64(resourceUsage.MilliValue()) / float64(cap.MilliValue())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,43 +174,55 @@ func classifyNodes(
|
||||
return lowNodes, highNodes
|
||||
}
|
||||
|
||||
// evictPodsFromTargetNodes evicts pods based on priority, if all the pods on the node have priority, if not
|
||||
// evictPodsFromSourceNodes evicts pods based on priority, if all the pods on the node have priority, if not
|
||||
// evicts them based on QoS as fallback option.
|
||||
// TODO: @ravig Break this function into smaller functions.
|
||||
func evictPodsFromTargetNodes(
|
||||
func evictPodsFromSourceNodes(
|
||||
ctx context.Context,
|
||||
targetNodes, lowNodes []NodeUsage,
|
||||
sourceNodes, destinationNodes []NodeUsage,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
podFilter func(pod *v1.Pod) bool,
|
||||
resourceNames []v1.ResourceName,
|
||||
strategy string,
|
||||
continueEviction continueEvictionCond,
|
||||
) {
|
||||
|
||||
sortNodesByUsage(targetNodes)
|
||||
sortNodesByUsage(sourceNodes)
|
||||
|
||||
// upper bound on total number of pods/cpu/memory to be moved
|
||||
// upper bound on total number of pods/cpu/memory and optional extended resources to be moved
|
||||
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourcePods: {},
|
||||
v1.ResourceCPU: {},
|
||||
v1.ResourceMemory: {},
|
||||
}
|
||||
|
||||
var taintsOfLowNodes = make(map[string][]v1.Taint, len(lowNodes))
|
||||
for _, node := range lowNodes {
|
||||
taintsOfLowNodes[node.node.Name] = node.node.Spec.Taints
|
||||
var taintsOfDestinationNodes = make(map[string][]v1.Taint, len(destinationNodes))
|
||||
for _, node := range destinationNodes {
|
||||
taintsOfDestinationNodes[node.node.Name] = node.node.Spec.Taints
|
||||
|
||||
for name := range totalAvailableUsage {
|
||||
for _, name := range resourceNames {
|
||||
if _, ok := totalAvailableUsage[name]; !ok {
|
||||
totalAvailableUsage[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
totalAvailableUsage[name].Add(*node.highResourceThreshold[name])
|
||||
totalAvailableUsage[name].Sub(*node.usage[name])
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(1).InfoS(
|
||||
"Total capacity to be moved",
|
||||
// log message in one line
|
||||
keysAndValues := []interface{}{
|
||||
"CPU", totalAvailableUsage[v1.ResourceCPU].MilliValue(),
|
||||
"Mem", totalAvailableUsage[v1.ResourceMemory].Value(),
|
||||
"Pods", totalAvailableUsage[v1.ResourcePods].Value(),
|
||||
)
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
if !isBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
|
||||
}
|
||||
}
|
||||
klog.V(1).InfoS("Total capacity to be moved", keysAndValues...)
|
||||
|
||||
for _, node := range targetNodes {
|
||||
for _, node := range sourceNodes {
|
||||
klog.V(3).InfoS("Evicting pods from node", "node", klog.KObj(node.node), "usage", node.usage)
|
||||
|
||||
nonRemovablePods, removablePods := classifyPods(node.allPods, podFilter)
|
||||
@@ -328,7 +236,7 @@ func evictPodsFromTargetNodes(
|
||||
klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
|
||||
evictPods(ctx, removablePods, node, totalAvailableUsage, taintsOfLowNodes, podEvictor)
|
||||
evictPods(ctx, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, strategy, continueEviction)
|
||||
klog.V(1).InfoS("Evicted pods from node", "node", klog.KObj(node.node), "evictedPods", podEvictor.NodeEvicted(node.node), "usage", node.usage)
|
||||
}
|
||||
}
|
||||
@@ -340,33 +248,18 @@ func evictPods(
|
||||
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
|
||||
taintsOfLowNodes map[string][]v1.Taint,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
strategy string,
|
||||
continueEviction continueEvictionCond,
|
||||
) {
|
||||
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
||||
continueCond := func() bool {
|
||||
if !isNodeAboveTargetUtilization(nodeUsage) {
|
||||
return false
|
||||
}
|
||||
if totalAvailableUsage[v1.ResourcePods].CmpInt64(0) < 1 {
|
||||
return false
|
||||
}
|
||||
if totalAvailableUsage[v1.ResourceCPU].CmpInt64(0) < 1 {
|
||||
return false
|
||||
}
|
||||
if totalAvailableUsage[v1.ResourceMemory].CmpInt64(0) < 1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if continueCond() {
|
||||
if continueEviction(nodeUsage, totalAvailableUsage) {
|
||||
for _, pod := range inputPods {
|
||||
if !utils.PodToleratesTaints(pod, taintsOfLowNodes) {
|
||||
klog.V(3).InfoS("Skipping eviction for pod, doesn't tolerate node taint", "pod", klog.KObj(pod))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
success, err := podEvictor.EvictPod(ctx, pod, nodeUsage.node, "LowNodeUtilization")
|
||||
success, err := podEvictor.EvictPod(ctx, pod, nodeUsage.node, strategy)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
@@ -375,20 +268,32 @@ func evictPods(
|
||||
if success {
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod), "err", err)
|
||||
|
||||
cpuQuantity := utils.GetResourceRequestQuantity(pod, v1.ResourceCPU)
|
||||
nodeUsage.usage[v1.ResourceCPU].Sub(cpuQuantity)
|
||||
totalAvailableUsage[v1.ResourceCPU].Sub(cpuQuantity)
|
||||
for name := range totalAvailableUsage {
|
||||
if name == v1.ResourcePods {
|
||||
nodeUsage.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
totalAvailableUsage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
} else {
|
||||
quantity := utils.GetResourceRequestQuantity(pod, name)
|
||||
nodeUsage.usage[name].Sub(quantity)
|
||||
totalAvailableUsage[name].Sub(quantity)
|
||||
}
|
||||
}
|
||||
|
||||
memoryQuantity := utils.GetResourceRequestQuantity(pod, v1.ResourceMemory)
|
||||
nodeUsage.usage[v1.ResourceMemory].Sub(memoryQuantity)
|
||||
totalAvailableUsage[v1.ResourceMemory].Sub(memoryQuantity)
|
||||
keysAndValues := []interface{}{
|
||||
"node", nodeUsage.node.Name,
|
||||
"CPU", nodeUsage.usage[v1.ResourceCPU].MilliValue(),
|
||||
"Mem", nodeUsage.usage[v1.ResourceMemory].Value(),
|
||||
"Pods", nodeUsage.usage[v1.ResourcePods].Value(),
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
if !isBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
|
||||
}
|
||||
}
|
||||
|
||||
nodeUsage.usage[v1.ResourcePods].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
totalAvailableUsage[v1.ResourcePods].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
|
||||
klog.V(3).InfoS("Updated node usage", "updatedUsage", nodeUsage)
|
||||
// check if node utilization drops below target threshold or any required capacity (cpu, memory, pods) is moved
|
||||
if !continueCond() {
|
||||
klog.V(3).InfoS("Updated node usage", keysAndValues...)
|
||||
// check if pods can be still evicted
|
||||
if !continueEviction(nodeUsage, totalAvailableUsage) {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -401,6 +306,15 @@ func sortNodesByUsage(nodes []NodeUsage) {
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
ti := nodes[i].usage[v1.ResourceMemory].Value() + nodes[i].usage[v1.ResourceCPU].MilliValue() + nodes[i].usage[v1.ResourcePods].Value()
|
||||
tj := nodes[j].usage[v1.ResourceMemory].Value() + nodes[j].usage[v1.ResourceCPU].MilliValue() + nodes[j].usage[v1.ResourcePods].Value()
|
||||
|
||||
// extended resources
|
||||
for name := range nodes[i].usage {
|
||||
if !isBasicResource(name) {
|
||||
ti = ti + nodes[i].usage[name].Value()
|
||||
tj = tj + nodes[j].usage[name].Value()
|
||||
}
|
||||
}
|
||||
|
||||
// To return sorted in descending order
|
||||
return ti > tj
|
||||
})
|
||||
@@ -431,16 +345,42 @@ func isNodeWithLowUtilization(usage NodeUsage) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func nodeUtilization(node *v1.Node, pods []*v1.Pod) map[v1.ResourceName]*resource.Quantity {
|
||||
// getResourceNames returns list of resource names in resource thresholds
|
||||
func getResourceNames(thresholds api.ResourceThresholds) []v1.ResourceName {
|
||||
resourceNames := make([]v1.ResourceName, 0, len(thresholds))
|
||||
for name := range thresholds {
|
||||
resourceNames = append(resourceNames, name)
|
||||
}
|
||||
return resourceNames
|
||||
}
|
||||
|
||||
// isBasicResource checks if resource is basic native.
|
||||
func isBasicResource(name v1.ResourceName) bool {
|
||||
switch name {
|
||||
case v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func nodeUtilization(node *v1.Node, pods []*v1.Pod, resourceNames []v1.ResourceName) map[v1.ResourceName]*resource.Quantity {
|
||||
totalReqs := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
|
||||
}
|
||||
for _, name := range resourceNames {
|
||||
if !isBasicResource(name) {
|
||||
totalReqs[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
for name, quantity := range req {
|
||||
if name == v1.ResourceCPU || name == v1.ResourceMemory {
|
||||
for _, name := range resourceNames {
|
||||
quantity, ok := req[name]
|
||||
if ok && name != v1.ResourcePods {
|
||||
// As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero,
|
||||
// the format of the quantity will be updated to the format of y.
|
||||
totalReqs[name].Add(quantity)
|
||||
@@ -0,0 +1,158 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodeutilization
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"math"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
lowPriority = int32(0)
|
||||
highPriority = int32(10000)
|
||||
extendedResource = v1.ResourceName("example.com/foo")
|
||||
)
|
||||
|
||||
func TestValidateThresholds(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input api.ResourceThresholds
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
name: "passing nil map for threshold",
|
||||
input: nil,
|
||||
errInfo: fmt.Errorf("no resource threshold is configured"),
|
||||
},
|
||||
{
|
||||
name: "passing no threshold",
|
||||
input: api.ResourceThresholds{},
|
||||
errInfo: fmt.Errorf("no resource threshold is configured"),
|
||||
},
|
||||
{
|
||||
name: "passing extended resource name other than cpu/memory/pods",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 40,
|
||||
extendedResource: 50,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing invalid resource value",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 110,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
errInfo: fmt.Errorf("%v threshold not in [%v, %v] range", v1.ResourceCPU, MinResourcePercentage, MaxResourcePercentage),
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with max and min resource value",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 100,
|
||||
v1.ResourceMemory: 0,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with only cpu",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with cpu, memory and pods",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 30,
|
||||
v1.ResourcePods: 40,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with only extended resource",
|
||||
input: api.ResourceThresholds{
|
||||
extendedResource: 80,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing a valid threshold with cpu, memory, pods and extended resource",
|
||||
input: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 30,
|
||||
v1.ResourcePods: 40,
|
||||
extendedResource: 50,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
validateErr := validateThresholds(test.input)
|
||||
|
||||
if validateErr == nil || test.errInfo == nil {
|
||||
if validateErr != test.errInfo {
|
||||
t.Errorf("expected validity of threshold: %#v to be %v but got %v instead", test.input, test.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != test.errInfo.Error() {
|
||||
t.Errorf("expected validity of threshold: %#v to be %v but got %v instead", test.input, test.errInfo, validateErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceUsagePercentages(t *testing.T) {
|
||||
resourceUsagePercentage := resourceUsagePercentages(NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
||||
},
|
||||
})
|
||||
|
||||
expectedUsageInIntPercentage := map[v1.ResourceName]float64{
|
||||
v1.ResourceCPU: 63,
|
||||
v1.ResourceMemory: 90,
|
||||
v1.ResourcePods: 37,
|
||||
}
|
||||
|
||||
for resourceName, percentage := range expectedUsageInIntPercentage {
|
||||
if math.Floor(resourceUsagePercentage[resourceName]) != percentage {
|
||||
t.Errorf("Incorrect percentange computation, expected %v, got math.Floor(%v) instead", percentage, resourceUsagePercentage[resourceName])
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("resourceUsagePercentage: %#v\n", resourceUsagePercentage)
|
||||
}
|
||||
@@ -55,9 +55,13 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces []string
|
||||
if strategy.Params != nil && strategy.Params.Namespaces != nil {
|
||||
includedNamespaces = strategy.Params.Namespaces.Include
|
||||
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||
var labelSelector *metav1.LabelSelector
|
||||
if strategy.Params != nil {
|
||||
if strategy.Params.Namespaces != nil {
|
||||
includedNamespaces = strategy.Params.Namespaces.Include
|
||||
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||
}
|
||||
labelSelector = strategy.Params.LabelSelector
|
||||
}
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
@@ -66,7 +70,12 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
||||
return
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
nodeFit := false
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
@@ -76,6 +85,7 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
||||
node,
|
||||
podutil.WithNamespaces(includedNamespaces),
|
||||
podutil.WithoutNamespaces(excludedNamespaces),
|
||||
podutil.WithLabelSelector(labelSelector),
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
|
||||
@@ -20,7 +20,8 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@@ -33,16 +34,30 @@ import (
|
||||
|
||||
func TestPodAntiAffinity(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name, nil)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name, nil)
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"datacenter": "east",
|
||||
}
|
||||
})
|
||||
|
||||
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
})
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node1.Name, nil)
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
|
||||
|
||||
criticalPriority := utils.SystemCriticalPriority
|
||||
nonEvictablePod := test.BuildTestPod("non-evict", 100, 0, node.Name, func(pod *v1.Pod) {
|
||||
nonEvictablePod := test.BuildTestPod("non-evict", 100, 0, node1.Name, func(pod *v1.Pod) {
|
||||
pod.Spec.Priority = &criticalPriority
|
||||
})
|
||||
p2.Labels = map[string]string{"foo": "bar"}
|
||||
@@ -71,36 +86,70 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
test.SetPodPriority(p6, 50)
|
||||
test.SetPodPriority(p7, 0)
|
||||
|
||||
// Set pod node selectors
|
||||
p8.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
maxPodsToEvictPerNode int
|
||||
pods []v1.Pod
|
||||
expectedEvictedPodCount int
|
||||
nodeFit bool
|
||||
nodes []*v1.Node
|
||||
}{
|
||||
{
|
||||
description: "Maximum pods to evict - 0",
|
||||
maxPodsToEvictPerNode: 0,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Maximum pods to evict - 3",
|
||||
maxPodsToEvictPerNode: 3,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Evict only 1 pod after sorting",
|
||||
maxPodsToEvictPerNode: 0,
|
||||
pods: []v1.Pod{*p5, *p6, *p7},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||
maxPodsToEvictPerNode: 1,
|
||||
pods: []v1.Pod{*p1, *nonEvictablePod},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||
maxPodsToEvictPerNode: 1,
|
||||
pods: []v1.Pod{*p1, *nonEvictablePod},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Won't evict pods because node selectors don't match available nodes",
|
||||
maxPodsToEvictPerNode: 1,
|
||||
pods: []v1.Pod{*p8, *nonEvictablePod},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "Won't evict pods because only other node is not schedulable",
|
||||
maxPodsToEvictPerNode: 1,
|
||||
pods: []v1.Pod{*p8, *nonEvictablePod},
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodeFit: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -110,19 +159,26 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
return true, &v1.PodList{Items: test.pods}, nil
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
return true, node1, nil
|
||||
})
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
test.maxPodsToEvictPerNode,
|
||||
[]*v1.Node{node},
|
||||
test.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: test.nodeFit,
|
||||
},
|
||||
}
|
||||
|
||||
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, podEvictor)
|
||||
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, strategy, test.nodes, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != test.expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v1meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
@@ -91,7 +91,7 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
|
||||
pods := listOldPodsOnNode(ctx, client, node, includedNamespaces, excludedNamespaces, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds, filter)
|
||||
pods := listOldPodsOnNode(ctx, client, node, includedNamespaces, excludedNamespaces, strategy.Params.LabelSelector, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds, filter)
|
||||
for _, pod := range pods {
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime")
|
||||
if success {
|
||||
@@ -107,7 +107,15 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
|
||||
}
|
||||
}
|
||||
|
||||
func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, includedNamespaces, excludedNamespaces []string, maxPodLifeTimeSeconds uint, filter func(pod *v1.Pod) bool) []*v1.Pod {
|
||||
func listOldPodsOnNode(
|
||||
ctx context.Context,
|
||||
client clientset.Interface,
|
||||
node *v1.Node,
|
||||
includedNamespaces, excludedNamespaces []string,
|
||||
labelSelector *metav1.LabelSelector,
|
||||
maxPodLifeTimeSeconds uint,
|
||||
filter func(pod *v1.Pod) bool,
|
||||
) []*v1.Pod {
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
client,
|
||||
@@ -115,6 +123,7 @@ func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1
|
||||
podutil.WithFilter(filter),
|
||||
podutil.WithNamespaces(includedNamespaces),
|
||||
podutil.WithoutNamespaces(excludedNamespaces),
|
||||
podutil.WithLabelSelector(labelSelector),
|
||||
)
|
||||
if err != nil {
|
||||
return nil
|
||||
@@ -122,7 +131,7 @@ func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1
|
||||
|
||||
var oldPods []*v1.Pod
|
||||
for _, pod := range pods {
|
||||
podAgeSeconds := uint(v1meta.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
|
||||
podAgeSeconds := uint(metav1.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
|
||||
if podAgeSeconds > maxPodLifeTimeSeconds {
|
||||
oldPods = append(oldPods, pod)
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@@ -33,15 +34,15 @@ import (
|
||||
|
||||
func TestPodLifeTime(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
olderPodCreationTime := metav1.NewTime(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC))
|
||||
newerPodCreationTime := metav1.NewTime(time.Now())
|
||||
|
||||
// Setup pods, one should be evicted
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p1.Namespace = "dev"
|
||||
p1.ObjectMeta.CreationTimestamp = newerPodCreationTime
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||
p2.Namespace = "dev"
|
||||
p2.ObjectMeta.CreationTimestamp = olderPodCreationTime
|
||||
|
||||
@@ -50,10 +51,10 @@ func TestPodLifeTime(t *testing.T) {
|
||||
p2.ObjectMeta.OwnerReferences = ownerRef1
|
||||
|
||||
// Setup pods, zero should be evicted
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||
p3.Namespace = "dev"
|
||||
p3.ObjectMeta.CreationTimestamp = newerPodCreationTime
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
|
||||
p4.Namespace = "dev"
|
||||
p4.ObjectMeta.CreationTimestamp = newerPodCreationTime
|
||||
|
||||
@@ -62,10 +63,10 @@ func TestPodLifeTime(t *testing.T) {
|
||||
p4.ObjectMeta.OwnerReferences = ownerRef2
|
||||
|
||||
// Setup pods, one should be evicted
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
|
||||
p5.Namespace = "dev"
|
||||
p5.ObjectMeta.CreationTimestamp = newerPodCreationTime
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
|
||||
p6.Namespace = "dev"
|
||||
p6.ObjectMeta.CreationTimestamp = metav1.NewTime(time.Now().Add(time.Second * 605))
|
||||
|
||||
@@ -74,10 +75,10 @@ func TestPodLifeTime(t *testing.T) {
|
||||
p6.ObjectMeta.OwnerReferences = ownerRef3
|
||||
|
||||
// Setup pods, zero should be evicted
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name, nil)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node1.Name, nil)
|
||||
p7.Namespace = "dev"
|
||||
p7.ObjectMeta.CreationTimestamp = newerPodCreationTime
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node.Name, nil)
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
|
||||
p8.Namespace = "dev"
|
||||
p8.ObjectMeta.CreationTimestamp = metav1.NewTime(time.Now().Add(time.Second * 595))
|
||||
|
||||
@@ -86,10 +87,10 @@ func TestPodLifeTime(t *testing.T) {
|
||||
p6.ObjectMeta.OwnerReferences = ownerRef4
|
||||
|
||||
// Setup two old pods with different status phases
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node.Name, nil)
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node1.Name, nil)
|
||||
p9.Namespace = "dev"
|
||||
p9.ObjectMeta.CreationTimestamp = olderPodCreationTime
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node.Name, nil)
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node1.Name, nil)
|
||||
p10.Namespace = "dev"
|
||||
p10.ObjectMeta.CreationTimestamp = olderPodCreationTime
|
||||
|
||||
@@ -98,13 +99,41 @@ func TestPodLifeTime(t *testing.T) {
|
||||
p9.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p10.ObjectMeta.OwnerReferences = ownerRef1
|
||||
|
||||
p11 := test.BuildTestPod("p11", 100, 0, node1.Name, func(pod *v1.Pod) {
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "pvc", VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "foo"},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.CreationTimestamp = olderPodCreationTime
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
})
|
||||
|
||||
// Setup two old pods with different labels
|
||||
p12 := test.BuildTestPod("p12", 100, 0, node1.Name, nil)
|
||||
p12.Namespace = "dev"
|
||||
p12.ObjectMeta.CreationTimestamp = olderPodCreationTime
|
||||
p13 := test.BuildTestPod("p13", 100, 0, node1.Name, nil)
|
||||
p13.Namespace = "dev"
|
||||
p13.ObjectMeta.CreationTimestamp = olderPodCreationTime
|
||||
|
||||
p12.ObjectMeta.Labels = map[string]string{"foo": "bar"}
|
||||
p13.ObjectMeta.Labels = map[string]string{"foo": "bar1"}
|
||||
p12.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p13.ObjectMeta.OwnerReferences = ownerRef1
|
||||
|
||||
var maxLifeTime uint = 600
|
||||
testCases := []struct {
|
||||
description string
|
||||
strategy api.DeschedulerStrategy
|
||||
maxPodsToEvictPerNode int
|
||||
pods []v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedEvictedPodCount int
|
||||
ignorePvcPods bool
|
||||
}{
|
||||
{
|
||||
description: "Two pods in the `dev` Namespace, 1 is new and 1 very is old. 1 should be evicted.",
|
||||
@@ -116,6 +145,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
@@ -128,6 +158,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p3, *p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
@@ -140,6 +171,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p5, *p6},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
@@ -152,6 +184,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p7, *p8},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
@@ -167,28 +200,73 @@ func TestPodLifeTime(t *testing.T) {
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p9, *p10},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "does not evict pvc pods with ignorePvcPods set to true",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p11},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
ignorePvcPods: true,
|
||||
},
|
||||
{
|
||||
description: "evicts pvc pods with ignorePvcPods set to false (or unset)",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p11},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Two old pods with different labels, 1 selected by labelSelector",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p12, *p13},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
fakeClient := &fake.Clientset{}
|
||||
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: tc.pods}, nil
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
})
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
[]*v1.Node{node},
|
||||
tc.nodes,
|
||||
false,
|
||||
false,
|
||||
tc.ignorePvcPods,
|
||||
)
|
||||
|
||||
PodLifeTime(ctx, fakeClient, tc.strategy, []*v1.Node{node}, podEvictor)
|
||||
PodLifeTime(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
|
||||
|
||||
@@ -67,7 +67,12 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
|
||||
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
nodeFit := false
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
@@ -78,6 +83,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
|
||||
podutil.WithFilter(evictable.IsEvictable),
|
||||
podutil.WithNamespaces(includedNamespaces),
|
||||
podutil.WithoutNamespaces(excludedNamespaces),
|
||||
podutil.WithLabelSelector(strategy.Params.LabelSelector),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error listing a nodes pods", "node", klog.KObj(node))
|
||||
|
||||
@@ -22,7 +22,8 @@ import (
|
||||
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@@ -81,7 +82,26 @@ func initPods(node *v1.Node) []v1.Pod {
|
||||
|
||||
func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
createStrategy := func(enabled, includingInitContainers bool, restartThresholds int32) api.DeschedulerStrategy {
|
||||
|
||||
node1 := test.BuildTestNode("node1", 2000, 3000, 10, nil)
|
||||
node2 := test.BuildTestNode("node2", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "hardware",
|
||||
Value: "gpu",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
})
|
||||
node3 := test.BuildTestNode("node3", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
})
|
||||
|
||||
pods := initPods(node1)
|
||||
|
||||
createStrategy := func(enabled, includingInitContainers bool, restartThresholds int32, nodeFit bool) api.DeschedulerStrategy {
|
||||
return api.DeschedulerStrategy{
|
||||
Enabled: enabled,
|
||||
Params: &api.StrategyParameters{
|
||||
@@ -89,76 +109,98 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
PodRestartThreshold: restartThresholds,
|
||||
IncludingInitContainers: includingInitContainers,
|
||||
},
|
||||
NodeFit: nodeFit,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
pods []v1.Pod
|
||||
nodes []*v1.Node
|
||||
strategy api.DeschedulerStrategy
|
||||
expectedEvictedPodCount int
|
||||
maxPodsToEvictPerNode int
|
||||
}{
|
||||
{
|
||||
description: "All pods have total restarts under threshold, no pod evictions",
|
||||
strategy: createStrategy(true, true, 10000),
|
||||
strategy: createStrategy(true, true, 10000, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Some pods have total restarts bigger than threshold",
|
||||
strategy: createStrategy(true, true, 1),
|
||||
strategy: createStrategy(true, true, 1, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pods evictions",
|
||||
strategy: createStrategy(true, true, 1*25),
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
||||
strategy: createStrategy(true, true, 1*25, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pods evictions",
|
||||
strategy: createStrategy(true, false, 1*25),
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions",
|
||||
strategy: createStrategy(true, false, 1*25, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 5,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pods evictions",
|
||||
strategy: createStrategy(true, true, 1*20),
|
||||
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
||||
strategy: createStrategy(true, true, 1*20, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pods evictions",
|
||||
strategy: createStrategy(true, false, 1*20),
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions",
|
||||
strategy: createStrategy(true, false, 1*20, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
|
||||
strategy: createStrategy(true, true, 5*25+1),
|
||||
strategy: createStrategy(true, true, 5*25+1, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
|
||||
strategy: createStrategy(true, false, 5*20+1),
|
||||
strategy: createStrategy(true, false, 5*20+1, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
maxPodsToEvictPerNode: 0,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pods evictions",
|
||||
strategy: createStrategy(true, true, 1),
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions",
|
||||
strategy: createStrategy(true, true, 1, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: 3,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tained, 0 pod evictions",
|
||||
strategy: createStrategy(true, true, 1, true),
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: 3,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions",
|
||||
strategy: createStrategy(true, true, 1, true),
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
node := test.BuildTestNode("node1", 2000, 3000, 10, nil)
|
||||
pods := initPods(node)
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
@@ -167,14 +209,16 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
[]*v1.Node{node},
|
||||
tc.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, []*v1.Node{node}, podEvictor)
|
||||
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
@@ -22,6 +22,8 @@ import (
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -45,28 +47,51 @@ type topology struct {
|
||||
pods []*v1.Pod
|
||||
}
|
||||
|
||||
func validateAndParseTopologySpreadParams(ctx context.Context, client clientset.Interface, params *api.StrategyParameters) (int32, sets.String, sets.String, error) {
|
||||
// topologySpreadStrategyParams contains validated strategy parameters
|
||||
type topologySpreadStrategyParams struct {
|
||||
thresholdPriority int32
|
||||
includedNamespaces sets.String
|
||||
excludedNamespaces sets.String
|
||||
labelSelector labels.Selector
|
||||
nodeFit bool
|
||||
}
|
||||
|
||||
// validateAndParseTopologySpreadParams will validate parameters to ensure that they do not contain invalid values.
|
||||
func validateAndParseTopologySpreadParams(ctx context.Context, client clientset.Interface, params *api.StrategyParameters) (*topologySpreadStrategyParams, error) {
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
if params == nil {
|
||||
return 0, includedNamespaces, excludedNamespaces, nil
|
||||
return &topologySpreadStrategyParams{includedNamespaces: includedNamespaces, excludedNamespaces: excludedNamespaces}, nil
|
||||
}
|
||||
// At most one of include/exclude can be set
|
||||
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||
return 0, includedNamespaces, excludedNamespaces, fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
return nil, fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
}
|
||||
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||
return 0, includedNamespaces, excludedNamespaces, fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
||||
return nil, fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, params)
|
||||
if err != nil {
|
||||
return 0, includedNamespaces, excludedNamespaces, fmt.Errorf("failed to get threshold priority from strategy's params: %+v", err)
|
||||
return nil, fmt.Errorf("failed to get threshold priority from strategy's params: %+v", err)
|
||||
}
|
||||
if params.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(params.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(params.Namespaces.Exclude...)
|
||||
}
|
||||
var selector labels.Selector
|
||||
if params.LabelSelector != nil {
|
||||
selector, err = metav1.LabelSelectorAsSelector(params.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return thresholdPriority, includedNamespaces, excludedNamespaces, nil
|
||||
return &topologySpreadStrategyParams{
|
||||
thresholdPriority: thresholdPriority,
|
||||
includedNamespaces: includedNamespaces,
|
||||
excludedNamespaces: excludedNamespaces,
|
||||
labelSelector: selector,
|
||||
nodeFit: params.NodeFit,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func RemovePodsViolatingTopologySpreadConstraint(
|
||||
@@ -76,17 +101,22 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
||||
nodes []*v1.Node,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
) {
|
||||
thresholdPriority, includedNamespaces, excludedNamespaces, err := validateAndParseTopologySpreadParams(ctx, client, strategy.Params)
|
||||
strategyParams, err := validateAndParseTopologySpreadParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Invalid PodLifeTime parameters")
|
||||
klog.ErrorS(err, "Invalid RemovePodsViolatingTopologySpreadConstraint parameters")
|
||||
return
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(
|
||||
evictions.WithPriorityThreshold(strategyParams.thresholdPriority),
|
||||
evictions.WithNodeFit(strategyParams.nodeFit),
|
||||
evictions.WithLabelSelector(strategyParams.labelSelector),
|
||||
)
|
||||
|
||||
nodeMap := make(map[string]*v1.Node, len(nodes))
|
||||
for _, node := range nodes {
|
||||
nodeMap[node.Name] = node
|
||||
}
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
// 1. for each namespace for which there is Topology Constraint
|
||||
// 2. for each TopologySpreadConstraint in that namespace
|
||||
@@ -110,8 +140,8 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
||||
podsForEviction := make(map[*v1.Pod]struct{})
|
||||
// 1. for each namespace...
|
||||
for _, namespace := range namespaces.Items {
|
||||
if (len(includedNamespaces) > 0 && !includedNamespaces.Has(namespace.Name)) ||
|
||||
(len(excludedNamespaces) > 0 && excludedNamespaces.Has(namespace.Name)) {
|
||||
if (len(strategyParams.includedNamespaces) > 0 && !strategyParams.includedNamespaces.Has(namespace.Name)) ||
|
||||
(len(strategyParams.excludedNamespaces) > 0 && strategyParams.excludedNamespaces.Has(namespace.Name)) {
|
||||
continue
|
||||
}
|
||||
namespacePods, err := client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
@@ -121,13 +151,11 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
||||
}
|
||||
|
||||
// ...where there is a topology constraint
|
||||
//namespaceTopologySpreadConstrainPods := make([]v1.Pod, 0, len(namespacePods.Items))
|
||||
namespaceTopologySpreadConstraints := make(map[v1.TopologySpreadConstraint]struct{})
|
||||
for _, pod := range namespacePods.Items {
|
||||
for _, constraint := range pod.Spec.TopologySpreadConstraints {
|
||||
// Only deal with hard topology constraints
|
||||
// TODO(@damemi): add support for soft constraints
|
||||
if constraint.WhenUnsatisfiable != v1.DoNotSchedule {
|
||||
// Ignore soft topology constraints if they are not included
|
||||
if constraint.WhenUnsatisfiable == v1.ScheduleAnyway && (strategy.Params == nil || !strategy.Params.IncludeSoftConstraints) {
|
||||
continue
|
||||
}
|
||||
namespaceTopologySpreadConstraints[constraint] = struct{}{}
|
||||
@@ -188,7 +216,10 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
||||
}
|
||||
|
||||
for pod := range podsForEviction {
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName], "PodTopologySpread"); err != nil && !evictable.IsEvictable(pod) {
|
||||
if !evictable.IsEvictable(pod) {
|
||||
continue
|
||||
}
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName], "PodTopologySpread"); err != nil {
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
}
|
||||
@@ -241,6 +272,7 @@ func balanceDomains(
|
||||
sumPods float64,
|
||||
isEvictable func(*v1.Pod) bool,
|
||||
nodeMap map[string]*v1.Node) {
|
||||
|
||||
idealAvg := sumPods / float64(len(constraintTopologies))
|
||||
sortedDomains := sortDomains(constraintTopologies, isEvictable)
|
||||
// i is the index for belowOrEqualAvg
|
||||
@@ -282,19 +314,11 @@ func balanceDomains(
|
||||
// also (just for tracking), add them to the list of pods in the lower topology
|
||||
aboveToEvict := sortedDomains[j].pods[len(sortedDomains[j].pods)-movePods:]
|
||||
for k := range aboveToEvict {
|
||||
// if the pod has a hard nodeAffinity or nodeSelector that only matches this node,
|
||||
// don't bother evicting it as it will just end up back on the same node
|
||||
// however we still account for it "being evicted" so the algorithm can complete
|
||||
// TODO(@damemi): Since we don't order pods wrt their affinities, we should refactor this to skip the current pod
|
||||
// but still try to get the required # of movePods (instead of just chopping that value off the slice above)
|
||||
if aboveToEvict[k].Spec.NodeSelector != nil ||
|
||||
(aboveToEvict[k].Spec.Affinity != nil &&
|
||||
aboveToEvict[k].Spec.Affinity.NodeAffinity != nil &&
|
||||
aboveToEvict[k].Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil &&
|
||||
nodesPodFitsOnBesidesCurrent(aboveToEvict[k], nodeMap) == 0) {
|
||||
klog.V(2).InfoS("Ignoring pod for eviction due to node selector/affinity", "pod", klog.KObj(aboveToEvict[k]))
|
||||
if err := validatePodFitsOnOtherNodes(aboveToEvict[k], nodeMap); err != nil {
|
||||
klog.V(2).InfoS(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(aboveToEvict[k]))
|
||||
continue
|
||||
}
|
||||
|
||||
podsForEviction[aboveToEvict[k]] = struct{}{}
|
||||
}
|
||||
sortedDomains[j].pods = sortedDomains[j].pods[:len(sortedDomains[j].pods)-movePods]
|
||||
@@ -302,18 +326,54 @@ func balanceDomains(
|
||||
}
|
||||
}
|
||||
|
||||
// nodesPodFitsOnBesidesCurrent counts the number of nodes this pod could fit on based on its affinity
|
||||
// validatePodFitsOnOtherNodes performs validation based on scheduling predicates for affinity and toleration.
|
||||
// It excludes the current node because, for the sake of domain balancing only, we care about if there is any other
|
||||
// place it could theoretically fit.
|
||||
// If the pod doesn't fit on its current node, that is a job for RemovePodsViolatingNodeAffinity, and irrelevant to Topology Spreading
|
||||
func nodesPodFitsOnBesidesCurrent(pod *v1.Pod, nodeMap map[string]*v1.Node) int {
|
||||
count := 0
|
||||
for _, node := range nodeMap {
|
||||
if nodeutil.PodFitsCurrentNode(pod, node) && node != nodeMap[pod.Spec.NodeName] {
|
||||
count++
|
||||
}
|
||||
func validatePodFitsOnOtherNodes(pod *v1.Pod, nodeMap map[string]*v1.Node) error {
|
||||
// if the pod has a hard nodeAffinity/nodeSelector/toleration that only matches this node,
|
||||
// don't bother evicting it as it will just end up back on the same node
|
||||
// however we still account for it "being evicted" so the algorithm can complete
|
||||
// TODO(@damemi): Since we don't order pods wrt their affinities, we should refactor this to skip the current pod
|
||||
// but still try to get the required # of movePods (instead of just chopping that value off the slice above)
|
||||
isRequiredDuringSchedulingIgnoredDuringExecution := pod.Spec.Affinity != nil &&
|
||||
pod.Spec.Affinity.NodeAffinity != nil &&
|
||||
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil
|
||||
|
||||
hardTaintsFilter := func(taint *v1.Taint) bool {
|
||||
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
|
||||
}
|
||||
return count
|
||||
|
||||
var eligibleNodesCount, ineligibleAffinityNodesCount, ineligibleTaintedNodesCount int
|
||||
for _, node := range nodeMap {
|
||||
if node == nodeMap[pod.Spec.NodeName] {
|
||||
continue
|
||||
}
|
||||
if pod.Spec.NodeSelector != nil || isRequiredDuringSchedulingIgnoredDuringExecution {
|
||||
if !nodeutil.PodFitsCurrentNode(pod, node) {
|
||||
ineligibleAffinityNodesCount++
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, hardTaintsFilter) {
|
||||
ineligibleTaintedNodesCount++
|
||||
continue
|
||||
}
|
||||
|
||||
eligibleNodesCount++
|
||||
}
|
||||
|
||||
if eligibleNodesCount == 0 {
|
||||
var errs []error
|
||||
if ineligibleAffinityNodesCount > 0 {
|
||||
errs = append(errs, fmt.Errorf("%d nodes with ineligible selector/affinity", ineligibleAffinityNodesCount))
|
||||
}
|
||||
if ineligibleTaintedNodesCount > 0 {
|
||||
errs = append(errs, fmt.Errorf("%d nodes with taints that are not tolerated", ineligibleTaintedNodesCount))
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sortDomains sorts and splits the list of topology domains based on their size
|
||||
|
||||
@@ -3,9 +3,10 @@ package strategies
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -57,8 +58,12 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: false,
|
||||
},
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2]",
|
||||
@@ -68,17 +73,10 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
},
|
||||
{
|
||||
count: 2,
|
||||
@@ -92,11 +90,15 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: false,
|
||||
},
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2], exclude kube-system namespace",
|
||||
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2] (soft constraints)",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
@@ -110,7 +112,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
@@ -127,7 +129,70 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{Enabled: true, Params: &api.StrategyParameters{Namespaces: &api.Namespaces{Exclude: []string{"kube-system"}}}},
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{IncludeSoftConstraints: true}},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [3,1], maxSkew=1, no pods eligible, move 0 pods",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
noOwners: true,
|
||||
},
|
||||
{
|
||||
count: 2,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
noOwners: true,
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
noOwners: true,
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: false,
|
||||
},
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2], exclude kube-system namespace",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
},
|
||||
{
|
||||
count: 2,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{Enabled: true, Params: &api.StrategyParameters{NodeFit: true, Namespaces: &api.Namespaces{Exclude: []string{"kube-system"}}}},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
@@ -138,17 +203,10 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
},
|
||||
{
|
||||
count: 4,
|
||||
@@ -162,8 +220,12 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: false,
|
||||
},
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods to achieve [2,2]",
|
||||
@@ -173,17 +235,10 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
@@ -192,8 +247,12 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 2,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: false,
|
||||
},
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [4,0], maxSkew=1, only move 1 pod since pods with nodeSelector and nodeAffinity aren't evicted",
|
||||
@@ -203,17 +262,10 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"zone": "zoneA"},
|
||||
},
|
||||
{
|
||||
@@ -239,8 +291,59 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods since selector matches multiple nodes",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) {
|
||||
n.Labels["zone"] = "zoneA"
|
||||
n.Labels["region"] = "boston"
|
||||
}),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) {
|
||||
n.Labels["zone"] = "zoneB"
|
||||
n.Labels["region"] = "boston"
|
||||
}),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"region": "boston"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
nodeSelector: map[string]string{"region": "boston"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
nodeSelector: map[string]string{"region": "boston"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
nodeSelector: map[string]string{"region": "boston"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 2,
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: false,
|
||||
},
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "3 domains, sizes [0, 1, 100], maxSkew=1, move 66 pods to get [34, 33, 34]",
|
||||
@@ -251,17 +354,10 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
count: 1,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
},
|
||||
{
|
||||
count: 100,
|
||||
@@ -283,17 +379,10 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
count: 1,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
@@ -307,8 +396,12 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 3,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: false,
|
||||
},
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains size [2 6], maxSkew=2, should move 1 to get [3 5]",
|
||||
@@ -316,6 +409,47 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(2),
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
count: 6,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: false,
|
||||
},
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains size [2 6], maxSkew=2, can't move any because of node taints",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) {
|
||||
n.Labels["zone"] = "zoneA"
|
||||
n.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "hardware",
|
||||
Value: "gpu",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
@@ -341,10 +475,307 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
// see https://github.com/kubernetes-sigs/descheduler/issues/564
|
||||
name: "Multiple constraints (6 nodes/2 zones, 4 pods)",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) {
|
||||
n.Labels = map[string]string{"topology.kubernetes.io/zone": "zoneA", "kubernetes.io/hostname": "n1"}
|
||||
}),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) {
|
||||
n.Labels = map[string]string{"topology.kubernetes.io/zone": "zoneA", "kubernetes.io/hostname": "n2"}
|
||||
}),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) {
|
||||
n.Labels = map[string]string{"topology.kubernetes.io/zone": "zoneA", "kubernetes.io/hostname": "n3"}
|
||||
}),
|
||||
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) {
|
||||
n.Labels = map[string]string{"topology.kubernetes.io/zone": "zoneB", "kubernetes.io/hostname": "n4"}
|
||||
}),
|
||||
test.BuildTestNode("n5", 2000, 3000, 10, func(n *v1.Node) {
|
||||
n.Labels = map[string]string{"topology.kubernetes.io/zone": "zoneB", "kubernetes.io/hostname": "n5"}
|
||||
}),
|
||||
test.BuildTestNode("n6", 2000, 3000, 10, func(n *v1.Node) {
|
||||
n.Labels = map[string]string{"topology.kubernetes.io/zone": "zoneB", "kubernetes.io/hostname": "n6"}
|
||||
}),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"app": "whoami"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "whoami"}},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "topology.kubernetes.io/zone",
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
},
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "whoami"}},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n2",
|
||||
labels: map[string]string{"app": "whoami"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "whoami"}},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "topology.kubernetes.io/zone",
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
},
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "whoami"}},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n3",
|
||||
labels: map[string]string{"app": "whoami"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "whoami"}},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "topology.kubernetes.io/zone",
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
},
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "whoami"}},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n4",
|
||||
labels: map[string]string{"app": "whoami"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "whoami"}},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "topology.kubernetes.io/zone",
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
},
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "whoami"}},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{IncludeSoftConstraints: true}},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod since pod tolerates the node with taint",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) {
|
||||
n.Labels["zone"] = "zoneB"
|
||||
n.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "taint-test",
|
||||
Value: "test",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
tolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "taint-test",
|
||||
Value: "test",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
nodeSelector: map[string]string{"zone": "zoneA"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods since pod does not tolerate the tainted node",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) {
|
||||
n.Labels["zone"] = "zoneB"
|
||||
n.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "taint-test",
|
||||
Value: "test",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
nodeSelector: map[string]string{"zone": "zoneA"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with PreferNoSchedule Taint",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) {
|
||||
n.Labels["zone"] = "zoneB"
|
||||
n.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "taint-test",
|
||||
Value: "test",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
nodeSelector: map[string]string{"zone": "zoneA"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [2,0], maxSkew=1, move 0 pod for node with unmatched label filtering",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpIn),
|
||||
},
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with matched label filtering",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
LabelSelector: getLabelSelector("foo", []string{"bar"}, metav1.LabelSelectorOpIn),
|
||||
},
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with matched label filtering (NotIn op)",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn),
|
||||
},
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -368,6 +799,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
100,
|
||||
tc.nodes,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
@@ -385,6 +818,8 @@ type testPodList struct {
|
||||
constraints []v1.TopologySpreadConstraint
|
||||
nodeSelector map[string]string
|
||||
nodeAffinity *v1.Affinity
|
||||
noOwners bool
|
||||
tolerations []v1.Toleration
|
||||
}
|
||||
|
||||
func createTestPods(testPods []testPodList) []*v1.Pod {
|
||||
@@ -398,13 +833,33 @@ func createTestPods(testPods []testPodList) []*v1.Pod {
|
||||
p.Labels = make(map[string]string)
|
||||
p.Labels = tp.labels
|
||||
p.Namespace = "ns1"
|
||||
p.ObjectMeta.OwnerReferences = ownerRef1
|
||||
if !tp.noOwners {
|
||||
p.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
p.Spec.TopologySpreadConstraints = tp.constraints
|
||||
p.Spec.NodeSelector = tp.nodeSelector
|
||||
p.Spec.Affinity = tp.nodeAffinity
|
||||
p.Spec.Tolerations = tp.tolerations
|
||||
}))
|
||||
podNum++
|
||||
}
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
func getLabelSelector(key string, values []string, operator metav1.LabelSelectorOperator) *metav1.LabelSelector {
|
||||
return &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{{Key: key, Operator: operator, Values: values}},
|
||||
}
|
||||
}
|
||||
|
||||
func getDefaultTopologyConstraints(maxSkew int32) []v1.TopologySpreadConstraint {
|
||||
return []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: maxSkew,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -82,7 +83,7 @@ func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resou
|
||||
return requestQuantity
|
||||
}
|
||||
|
||||
// IsMirrorPod returns true if the passed Pod is a Mirror Pod.
|
||||
// IsMirrorPod returns true if the pod is a Mirror Pod.
|
||||
func IsMirrorPod(pod *v1.Pod) bool {
|
||||
_, ok := pod.Annotations[v1.MirrorPodAnnotationKey]
|
||||
return ok
|
||||
@@ -94,6 +95,42 @@ func IsStaticPod(pod *v1.Pod) bool {
|
||||
return err == nil && source != "api"
|
||||
}
|
||||
|
||||
// IsCriticalPriorityPod returns true if the pod has critical priority.
|
||||
func IsCriticalPriorityPod(pod *v1.Pod) bool {
|
||||
return pod.Spec.Priority != nil && *pod.Spec.Priority >= SystemCriticalPriority
|
||||
}
|
||||
|
||||
// IsDaemonsetPod returns true if the pod is a IsDaemonsetPod.
|
||||
func IsDaemonsetPod(ownerRefList []metav1.OwnerReference) bool {
|
||||
for _, ownerRef := range ownerRefList {
|
||||
if ownerRef.Kind == "DaemonSet" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodWithLocalStorage returns true if the pod has local storage.
|
||||
func IsPodWithLocalStorage(pod *v1.Pod) bool {
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.HostPath != nil || volume.EmptyDir != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodWithLocalStorage returns true if the pod has claimed a persistent volume.
|
||||
func IsPodWithPVC(pod *v1.Pod) bool {
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.PersistentVolumeClaim != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetPodSource returns the source of the pod based on the annotation.
|
||||
func GetPodSource(pod *v1.Pod) (string, error) {
|
||||
if pod.Annotations != nil {
|
||||
@@ -104,23 +141,6 @@ func GetPodSource(pod *v1.Pod) (string, error) {
|
||||
return "", fmt.Errorf("cannot get source of pod %q", pod.UID)
|
||||
}
|
||||
|
||||
// IsCriticalPod returns true if the pod is a static or mirror pod.
|
||||
func IsCriticalPod(pod *v1.Pod) bool {
|
||||
if IsStaticPod(pod) {
|
||||
return true
|
||||
}
|
||||
|
||||
if IsMirrorPod(pod) {
|
||||
return true
|
||||
}
|
||||
|
||||
if pod.Spec.Priority != nil && *pod.Spec.Priority >= SystemCriticalPriority {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all
|
||||
// containers of the pod. If PodOverhead feature is enabled, pod overhead is added to the
|
||||
// total container resource requests and to the total container limits which have a
|
||||
|
||||
@@ -18,6 +18,8 @@ package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -80,6 +82,116 @@ func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func uniqueSortNodeSelectorTerms(srcTerms []v1.NodeSelectorTerm) []v1.NodeSelectorTerm {
|
||||
terms := append([]v1.NodeSelectorTerm{}, srcTerms...)
|
||||
for i := range terms {
|
||||
if terms[i].MatchExpressions != nil {
|
||||
terms[i].MatchExpressions = uniqueSortNodeSelectorRequirements(terms[i].MatchExpressions)
|
||||
}
|
||||
if terms[i].MatchFields != nil {
|
||||
terms[i].MatchFields = uniqueSortNodeSelectorRequirements(terms[i].MatchFields)
|
||||
}
|
||||
}
|
||||
|
||||
if len(terms) < 2 {
|
||||
return terms
|
||||
}
|
||||
|
||||
lastTerm := terms[0]
|
||||
uniqueTerms := append([]v1.NodeSelectorTerm{}, terms[0])
|
||||
|
||||
for _, term := range terms[1:] {
|
||||
if reflect.DeepEqual(term, lastTerm) {
|
||||
continue
|
||||
}
|
||||
lastTerm = term
|
||||
uniqueTerms = append(uniqueTerms, term)
|
||||
}
|
||||
|
||||
return uniqueTerms
|
||||
}
|
||||
|
||||
// sort NodeSelectorRequirement in (key, operator, values) order
|
||||
func uniqueSortNodeSelectorRequirements(srcReqs []v1.NodeSelectorRequirement) []v1.NodeSelectorRequirement {
|
||||
reqs := append([]v1.NodeSelectorRequirement{}, srcReqs...)
|
||||
|
||||
// unique sort Values
|
||||
for i := range reqs {
|
||||
sort.Strings(reqs[i].Values)
|
||||
if len(reqs[i].Values) > 1 {
|
||||
lastString := reqs[i].Values[0]
|
||||
values := []string{lastString}
|
||||
for _, val := range reqs[i].Values {
|
||||
if val == lastString {
|
||||
continue
|
||||
}
|
||||
lastString = val
|
||||
values = append(values, val)
|
||||
}
|
||||
reqs[i].Values = values
|
||||
}
|
||||
}
|
||||
|
||||
if len(reqs) < 2 {
|
||||
return reqs
|
||||
}
|
||||
|
||||
// unique sort reqs
|
||||
sort.Slice(reqs, func(i, j int) bool {
|
||||
if reqs[i].Key < reqs[j].Key {
|
||||
return true
|
||||
}
|
||||
if reqs[i].Key > reqs[j].Key {
|
||||
return false
|
||||
}
|
||||
if reqs[i].Operator < reqs[j].Operator {
|
||||
return true
|
||||
}
|
||||
if reqs[i].Operator > reqs[j].Operator {
|
||||
return false
|
||||
}
|
||||
if len(reqs[i].Values) < len(reqs[j].Values) {
|
||||
return true
|
||||
}
|
||||
if len(reqs[i].Values) > len(reqs[j].Values) {
|
||||
return false
|
||||
}
|
||||
for k := range reqs[i].Values {
|
||||
if reqs[i].Values[k] < reqs[j].Values[k] {
|
||||
return true
|
||||
}
|
||||
if reqs[i].Values[k] > reqs[j].Values[k] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
lastReq := reqs[0]
|
||||
uniqueReqs := append([]v1.NodeSelectorRequirement{}, lastReq)
|
||||
for _, req := range reqs[1:] {
|
||||
if reflect.DeepEqual(req, lastReq) {
|
||||
continue
|
||||
}
|
||||
lastReq = req
|
||||
uniqueReqs = append(uniqueReqs, req)
|
||||
}
|
||||
return uniqueReqs
|
||||
}
|
||||
|
||||
func NodeSelectorsEqual(n1, n2 *v1.NodeSelector) bool {
|
||||
if n1 == nil && n2 == nil {
|
||||
return true
|
||||
}
|
||||
if n1 == nil || n2 == nil {
|
||||
return false
|
||||
}
|
||||
return reflect.DeepEqual(
|
||||
uniqueSortNodeSelectorTerms(n1.NodeSelectorTerms),
|
||||
uniqueSortNodeSelectorTerms(n2.NodeSelectorTerms),
|
||||
)
|
||||
}
|
||||
|
||||
// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations.
|
||||
func TolerationsTolerateTaint(tolerations []v1.Toleration, taint *v1.Taint) bool {
|
||||
for i := range tolerations {
|
||||
@@ -95,10 +207,6 @@ type taintsFilterFunc func(*v1.Taint) bool
|
||||
// TolerationsTolerateTaintsWithFilter checks if given tolerations tolerates
|
||||
// all the taints that apply to the filter in given taint list.
|
||||
func TolerationsTolerateTaintsWithFilter(tolerations []v1.Toleration, taints []v1.Taint, applyFilter taintsFilterFunc) bool {
|
||||
if len(taints) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for i := range taints {
|
||||
if applyFilter != nil && !applyFilter(&taints[i]) {
|
||||
continue
|
||||
@@ -111,3 +219,59 @@ func TolerationsTolerateTaintsWithFilter(tolerations []v1.Toleration, taints []v
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// sort by (key, value, effect, operand)
|
||||
func uniqueSortTolerations(srcTolerations []v1.Toleration) []v1.Toleration {
|
||||
tolerations := append([]v1.Toleration{}, srcTolerations...)
|
||||
|
||||
if len(tolerations) < 2 {
|
||||
return tolerations
|
||||
}
|
||||
|
||||
sort.Slice(tolerations, func(i, j int) bool {
|
||||
if tolerations[i].Key < tolerations[j].Key {
|
||||
return true
|
||||
}
|
||||
if tolerations[i].Key > tolerations[j].Key {
|
||||
return false
|
||||
}
|
||||
if tolerations[i].Value < tolerations[j].Value {
|
||||
return true
|
||||
}
|
||||
if tolerations[i].Value > tolerations[j].Value {
|
||||
return false
|
||||
}
|
||||
if tolerations[i].Effect < tolerations[j].Effect {
|
||||
return true
|
||||
}
|
||||
if tolerations[i].Effect > tolerations[j].Effect {
|
||||
return false
|
||||
}
|
||||
return tolerations[i].Operator < tolerations[j].Operator
|
||||
})
|
||||
uniqueTolerations := []v1.Toleration{tolerations[0]}
|
||||
idx := 0
|
||||
for _, t := range tolerations[1:] {
|
||||
if t.MatchToleration(&uniqueTolerations[idx]) {
|
||||
continue
|
||||
}
|
||||
idx++
|
||||
uniqueTolerations = append(uniqueTolerations, t)
|
||||
}
|
||||
return uniqueTolerations
|
||||
}
|
||||
|
||||
func TolerationsEqual(t1, t2 []v1.Toleration) bool {
|
||||
t1Sorted := uniqueSortTolerations(t1)
|
||||
t2Sorted := uniqueSortTolerations(t2)
|
||||
l1Len := len(t1Sorted)
|
||||
if l1Len != len(t2Sorted) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < l1Len; i++ {
|
||||
if !t1Sorted[i].MatchToleration(&t2Sorted[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
940
pkg/utils/predicates_test.go
Normal file
940
pkg/utils/predicates_test.go
Normal file
@@ -0,0 +1,940 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestUniqueSortTolerations(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
tolerations []v1.Toleration
|
||||
expectedTolerations []v1.Toleration
|
||||
}{
|
||||
{
|
||||
name: "sort by key",
|
||||
tolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value2",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key3",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value3",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
expectedTolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value2",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key3",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value3",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sort by value",
|
||||
tolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value2",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value3",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
expectedTolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value2",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value3",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sort by effect",
|
||||
tolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
},
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
},
|
||||
expectedTolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
},
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sort unique",
|
||||
tolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value2",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value2",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
expectedTolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value2",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
resultTolerations := uniqueSortTolerations(test.tolerations)
|
||||
if !reflect.DeepEqual(resultTolerations, test.expectedTolerations) {
|
||||
t.Errorf("tolerations not sorted as expected, \n\tgot: %#v, \n\texpected: %#v", resultTolerations, test.expectedTolerations)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTolerationsEqual(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
leftTolerations, rightTolerations []v1.Toleration
|
||||
equal bool
|
||||
}{
|
||||
{
|
||||
name: "identical lists",
|
||||
leftTolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value2",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
rightTolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value2",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
equal: true,
|
||||
},
|
||||
{
|
||||
name: "equal lists",
|
||||
leftTolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value2",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value2",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
rightTolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value2",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
equal: true,
|
||||
},
|
||||
{
|
||||
name: "non-equal lists",
|
||||
leftTolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value2",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
rightTolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value2",
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
},
|
||||
},
|
||||
equal: false,
|
||||
},
|
||||
{
|
||||
name: "different sizes lists",
|
||||
leftTolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
rightTolerations: []v1.Toleration{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value2",
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
},
|
||||
},
|
||||
equal: false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
equal := TolerationsEqual(test.leftTolerations, test.rightTolerations)
|
||||
if equal != test.equal {
|
||||
t.Errorf("TolerationsEqual expected to be %v, got %v", test.equal, equal)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniqueSortNodeSelectorRequirements(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
requirements []v1.NodeSelectorRequirement
|
||||
expectedRequirements []v1.NodeSelectorRequirement
|
||||
}{
|
||||
{
|
||||
name: "Identical requirements",
|
||||
requirements: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
expectedRequirements: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Sorted requirements",
|
||||
requirements: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v2"},
|
||||
},
|
||||
},
|
||||
expectedRequirements: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Sort values",
|
||||
requirements: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v2", "v1"},
|
||||
},
|
||||
},
|
||||
expectedRequirements: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Sort by key",
|
||||
requirements: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k3",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
},
|
||||
expectedRequirements: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k3",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Sort by operator",
|
||||
requirements: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpExists,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpGt,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
},
|
||||
expectedRequirements: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpExists,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpGt,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Sort by values",
|
||||
requirements: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v6", "v5"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v2", "v1"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v4", "v1"},
|
||||
},
|
||||
},
|
||||
expectedRequirements: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v4"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v5", "v6"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
resultRequirements := uniqueSortNodeSelectorRequirements(test.requirements)
|
||||
if !reflect.DeepEqual(resultRequirements, test.expectedRequirements) {
|
||||
t.Errorf("Requirements not sorted as expected, \n\tgot: %#v, \n\texpected: %#v", resultRequirements, test.expectedRequirements)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniqueSortNodeSelectorTerms(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
terms []v1.NodeSelectorTerm
|
||||
expectedTerms []v1.NodeSelectorTerm
|
||||
}{
|
||||
{
|
||||
name: "Identical terms",
|
||||
terms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Sorted terms",
|
||||
terms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Sort terms",
|
||||
terms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v2", "v1"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v3", "v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v3"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Unique sort terms",
|
||||
terms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v2", "v1"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v2", "v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v2", "v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
resultTerms := uniqueSortNodeSelectorTerms(test.terms)
|
||||
if !reflect.DeepEqual(resultTerms, test.expectedTerms) {
|
||||
t.Errorf("Terms not sorted as expected, \n\tgot: %#v, \n\texpected: %#v", resultTerms, test.expectedTerms)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeSelectorTermsEqual(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
leftSelector, rightSelector v1.NodeSelector
|
||||
equal bool
|
||||
}{
|
||||
{
|
||||
name: "identical selectors",
|
||||
leftSelector: v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
rightSelector: v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
equal: true,
|
||||
},
|
||||
{
|
||||
name: "equal selectors",
|
||||
leftSelector: v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v1"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
rightSelector: v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
equal: true,
|
||||
},
|
||||
{
|
||||
name: "non-equal selectors in values",
|
||||
leftSelector: v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
rightSelector: v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
equal: false,
|
||||
},
|
||||
{
|
||||
name: "non-equal selectors in keys",
|
||||
leftSelector: v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k3",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
rightSelector: v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"v1", "v2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
equal: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
equal := NodeSelectorsEqual(&test.leftSelector, &test.rightSelector)
|
||||
if equal != test.equal {
|
||||
t.Errorf("NodeSelectorsEqual expected to be %v, got %v", test.equal, equal)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
90
pkg/version/version.go
Normal file
90
pkg/version/version.go
Normal file
@@ -0,0 +1,90 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package version
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// version is a constant representing the version tag that
|
||||
// generated this build. It should be set during build via -ldflags.
|
||||
version string
|
||||
// buildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
// It should be set during build via -ldflags.
|
||||
buildDate string
|
||||
// gitbranch is a constant representing git branch for this build.
|
||||
// It should be set during build via -ldflags.
|
||||
gitbranch string
|
||||
// gitbranch is a constant representing git sha1 for this build.
|
||||
// It should be set during build via -ldflags.
|
||||
gitsha1 string
|
||||
)
|
||||
|
||||
// Info holds the information related to descheduler app version.
|
||||
type Info struct {
|
||||
Major string `json:"major"`
|
||||
Minor string `json:"minor"`
|
||||
GitVersion string `json:"gitVersion"`
|
||||
GitBranch string `json:"gitBranch"`
|
||||
GitSha1 string `json:"gitSha1"`
|
||||
BuildDate string `json:"buildDate"`
|
||||
GoVersion string `json:"goVersion"`
|
||||
Compiler string `json:"compiler"`
|
||||
Platform string `json:"platform"`
|
||||
}
|
||||
|
||||
// Get returns the overall codebase version. It's for detecting
|
||||
// what code a binary was built from.
|
||||
func Get() Info {
|
||||
majorVersion, minorVersion := splitVersion(version)
|
||||
return Info{
|
||||
Major: majorVersion,
|
||||
Minor: minorVersion,
|
||||
GitVersion: version,
|
||||
GitBranch: gitbranch,
|
||||
GitSha1: gitsha1,
|
||||
BuildDate: buildDate,
|
||||
GoVersion: runtime.Version(),
|
||||
Compiler: runtime.Compiler,
|
||||
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
|
||||
}
|
||||
}
|
||||
|
||||
// splitVersion splits the git version to generate major and minor versions needed.
|
||||
func splitVersion(version string) (string, string) {
|
||||
if version == "" {
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// Version from an automated container build environment for a tag. For example v20200521-v0.18.0.
|
||||
m1, _ := regexp.MatchString(`^v\d{8}-v\d+\.\d+\.\d+$`, version)
|
||||
|
||||
// Version from an automated container build environment(not a tag) or a local build. For example v20201009-v0.18.0-46-g939c1c0.
|
||||
m2, _ := regexp.MatchString(`^v\d{8}-v\d+\.\d+\.\d+-\w+-\w+$`, version)
|
||||
|
||||
if m1 || m2 {
|
||||
semVer := strings.Split(version, "-")[1]
|
||||
return strings.Trim(strings.Split(semVer, ".")[0], "v"), strings.Split(semVer, ".")[1] + "+"
|
||||
}
|
||||
|
||||
// Something went wrong
|
||||
return "", ""
|
||||
}
|
||||
BIN
strategies_diagram.png
Normal file
BIN
strategies_diagram.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 85 KiB |
File diff suppressed because it is too large
Load Diff
@@ -14,22 +14,24 @@
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
# This just run e2e tests.
|
||||
set -x
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
|
||||
# This just runs e2e tests.
|
||||
if [ -n "$KIND_E2E" ]; then
|
||||
K8S_VERSION=${KUBERNETES_VERSION:-v1.18.2}
|
||||
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
|
||||
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.9.0/kind-linux-amd64
|
||||
K8S_VERSION=${KUBERNETES_VERSION:-v1.21.1}
|
||||
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && mv kubectl /usr/local/bin/
|
||||
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.11.0/kind-linux-amd64
|
||||
chmod +x kind-linux-amd64
|
||||
mv kind-linux-amd64 kind
|
||||
export PATH=$PATH:$PWD
|
||||
kind create cluster --image kindest/node:${K8S_VERSION} --config=./hack/kind_config.yaml
|
||||
export KUBECONFIG="$(kind get kubeconfig-path)"
|
||||
docker pull kubernetes/pause
|
||||
kind load docker-image kubernetes/pause
|
||||
kind get kubeconfig > /tmp/admin.conf
|
||||
export KUBECONFIG="/tmp/admin.conf"
|
||||
mkdir -p ~/gopath/src/sigs.k8s.io/
|
||||
mv ~/gopath/src/github.com/kubernetes-sigs/descheduler ~/gopath/src/sigs.k8s.io/.
|
||||
fi
|
||||
|
||||
PRJ_PREFIX="sigs.k8s.io/descheduler"
|
||||
|
||||
36
test/run-helm-tests.sh
Executable file
36
test/run-helm-tests.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#!/bin/bash
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
K8S_VERSION=${KUBERNETES_VERSION:-v1.21.1}
|
||||
IMAGE_REPO=${HELM_IMAGE_REPO:-descheduler}
|
||||
IMAGE_TAG=${HELM_IMAGE_TAG:-helm-test}
|
||||
CHART_LOCATION=${HELM_CHART_LOCATION:-./charts/descheduler}
|
||||
VERSION=helm-test make image
|
||||
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.11.0/kind-linux-amd64
|
||||
chmod +x kind-linux-amd64
|
||||
mv kind-linux-amd64 kind
|
||||
export PATH=$PATH:$PWD
|
||||
kind create cluster --image kindest/node:"${K8S_VERSION}" --config=./hack/kind_config.yaml
|
||||
kind load docker-image descheduler:helm-test
|
||||
helm install descheduler-ci --set image.repository="${IMAGE_REPO}",image.tag="${IMAGE_TAG}" --namespace kube-system "${CHART_LOCATION}"
|
||||
sleep 20s
|
||||
helm test descheduler-ci --namespace kube-system
|
||||
|
||||
# Delete kind cluster once test is finished
|
||||
kind delete cluster
|
||||
@@ -14,6 +14,10 @@
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
|
||||
# This just run unit-tests. Ignoring the current directory so as to avoid running e2e tests.
|
||||
PRJ_PREFIX="sigs.k8s.io/descheduler"
|
||||
go test $(go list ${PRJ_PREFIX}/... | grep -v ${PRJ_PREFIX}/vendor/| grep -v ${PRJ_PREFIX}/test/)
|
||||
|
||||
@@ -19,7 +19,7 @@ package test
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
@@ -79,6 +79,13 @@ func GetReplicaSetOwnerRefList() []metav1.OwnerReference {
|
||||
return ownerRefList
|
||||
}
|
||||
|
||||
// GetStatefulSetOwnerRefList returns the ownerRef needed for statefulset pod.
|
||||
func GetStatefulSetOwnerRefList() []metav1.OwnerReference {
|
||||
ownerRefList := make([]metav1.OwnerReference, 0)
|
||||
ownerRefList = append(ownerRefList, metav1.OwnerReference{Kind: "StatefulSet", APIVersion: "v1", Name: "statefulset-1"})
|
||||
return ownerRefList
|
||||
}
|
||||
|
||||
// GetDaemonSetOwnerRefList returns the ownerRef needed for daemonset pod.
|
||||
func GetDaemonSetOwnerRefList() []metav1.OwnerReference {
|
||||
ownerRefList := make([]metav1.OwnerReference, 0)
|
||||
@@ -142,6 +149,11 @@ func SetRSOwnerRef(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = GetReplicaSetOwnerRefList()
|
||||
}
|
||||
|
||||
// SetSSOwnerRef sets the given pod's owner to StatefulSet
|
||||
func SetSSOwnerRef(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = GetStatefulSetOwnerRefList()
|
||||
}
|
||||
|
||||
// SetDSOwnerRef sets the given pod's owner to DaemonSet
|
||||
func SetDSOwnerRef(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = GetDaemonSetOwnerRefList()
|
||||
@@ -161,3 +173,14 @@ func SetPodPriority(pod *v1.Pod, priority int32) {
|
||||
func SetNodeUnschedulable(node *v1.Node) {
|
||||
node.Spec.Unschedulable = true
|
||||
}
|
||||
|
||||
// SetPodExtendedResourceRequest sets the given pod's extended resources
|
||||
func SetPodExtendedResourceRequest(pod *v1.Pod, resourceName v1.ResourceName, requestQuantity int64) {
|
||||
pod.Spec.Containers[0].Resources.Requests[resourceName] = *resource.NewQuantity(requestQuantity, resource.DecimalSI)
|
||||
}
|
||||
|
||||
// SetNodeExtendedResouces sets the given node's extended resources
|
||||
func SetNodeExtendedResource(node *v1.Node, resourceName v1.ResourceName, requestQuantity int64) {
|
||||
node.Status.Capacity[resourceName] = *resource.NewQuantity(requestQuantity, resource.DecimalSI)
|
||||
node.Status.Allocatable[resourceName] = *resource.NewQuantity(requestQuantity, resource.DecimalSI)
|
||||
}
|
||||
|
||||
17
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
17
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
@@ -299,18 +299,24 @@ type MultiTenantServicePrincipalTokenAuthorizer interface {
|
||||
|
||||
// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider
|
||||
func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer {
|
||||
return &multiTenantSPTAuthorizer{tp: tp}
|
||||
return NewMultiTenantBearerAuthorizer(tp)
|
||||
}
|
||||
|
||||
type multiTenantSPTAuthorizer struct {
|
||||
// MultiTenantBearerAuthorizer implements bearer authorization across multiple tenants.
|
||||
type MultiTenantBearerAuthorizer struct {
|
||||
tp adal.MultitenantOAuthTokenProvider
|
||||
}
|
||||
|
||||
// NewMultiTenantBearerAuthorizer creates a MultiTenantBearerAuthorizer using the given token provider.
|
||||
func NewMultiTenantBearerAuthorizer(tp adal.MultitenantOAuthTokenProvider) *MultiTenantBearerAuthorizer {
|
||||
return &MultiTenantBearerAuthorizer{tp: tp}
|
||||
}
|
||||
|
||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the
|
||||
// primary token along with the auxiliary authorization header using the auxiliary tokens.
|
||||
//
|
||||
// By default, the token will be automatically refreshed through the Refresher interface.
|
||||
func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator {
|
||||
func (mt *MultiTenantBearerAuthorizer) WithAuthorization() PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
@@ -340,3 +346,8 @@ func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TokenProvider returns the underlying MultitenantOAuthTokenProvider for this authorizer.
|
||||
func (mt *MultiTenantBearerAuthorizer) TokenProvider() adal.MultitenantOAuthTokenProvider {
|
||||
return mt.tp
|
||||
}
|
||||
|
||||
7
vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go
generated
vendored
7
vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go
generated
vendored
@@ -54,13 +54,12 @@ func (sas *SASTokenAuthorizer) WithAuthorization() PrepareDecorator {
|
||||
return r, err
|
||||
}
|
||||
|
||||
if r.URL.RawQuery != "" {
|
||||
r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken)
|
||||
} else {
|
||||
if r.URL.RawQuery == "" {
|
||||
r.URL.RawQuery = sas.sasToken
|
||||
} else if !strings.Contains(r.URL.RawQuery, sas.sasToken) {
|
||||
r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken)
|
||||
}
|
||||
|
||||
r.RequestURI = r.URL.String()
|
||||
return Prepare(r)
|
||||
})
|
||||
}
|
||||
|
||||
3
vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go
generated
vendored
3
vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go
generated
vendored
@@ -152,6 +152,9 @@ func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType)
|
||||
// the resource's URI should be encoded exactly as it is in the URI.
|
||||
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
|
||||
cr.WriteString(u.EscapedPath())
|
||||
} else {
|
||||
// a slash is required to indicate the root path
|
||||
cr.WriteString("/")
|
||||
}
|
||||
|
||||
params, err := url.ParseQuery(u.RawQuery)
|
||||
|
||||
11
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
11
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
@@ -413,12 +413,12 @@ func (pt *pollingTrackerBase) updateRawBody() error {
|
||||
if err != nil {
|
||||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body")
|
||||
}
|
||||
// put the body back so it's available to other callers
|
||||
pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
// observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
// put the body back so it's available to other callers
|
||||
pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
if err = json.Unmarshal(b, &pt.rawBody); err != nil {
|
||||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body")
|
||||
}
|
||||
@@ -466,7 +466,12 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() {
|
||||
re := respErr{}
|
||||
defer pt.resp.Body.Close()
|
||||
var b []byte
|
||||
if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 {
|
||||
if b, err = ioutil.ReadAll(pt.resp.Body); err != nil {
|
||||
goto Default
|
||||
}
|
||||
// put the body back so it's available to other callers
|
||||
pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
if len(b) == 0 {
|
||||
goto Default
|
||||
}
|
||||
if err = json.Unmarshal(b, &re); err != nil {
|
||||
|
||||
5
vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
generated
vendored
5
vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
generated
vendored
@@ -171,6 +171,11 @@ type Resource struct {
|
||||
ResourceName string
|
||||
}
|
||||
|
||||
// String function returns a string in form of azureResourceID
|
||||
func (r Resource) String() string {
|
||||
return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s", r.SubscriptionID, r.ResourceGroup, r.Provider, r.ResourceType, r.ResourceName)
|
||||
}
|
||||
|
||||
// ParseResourceID parses a resource ID into a ResourceDetails struct.
|
||||
// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4.
|
||||
func ParseResourceID(resourceID string) (Resource, error) {
|
||||
|
||||
20
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
20
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
@@ -46,6 +46,8 @@ type ResourceIdentifier struct {
|
||||
Batch string `json:"batch"`
|
||||
OperationalInsights string `json:"operationalInsights"`
|
||||
Storage string `json:"storage"`
|
||||
Synapse string `json:"synapse"`
|
||||
ServiceBus string `json:"serviceBus"`
|
||||
}
|
||||
|
||||
// Environment represents a set of endpoints for each of Azure's Clouds.
|
||||
@@ -71,6 +73,8 @@ type Environment struct {
|
||||
ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
|
||||
CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"`
|
||||
TokenAudience string `json:"tokenAudience"`
|
||||
APIManagementHostNameSuffix string `json:"apiManagementHostNameSuffix"`
|
||||
SynapseEndpointSuffix string `json:"synapseEndpointSuffix"`
|
||||
ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"`
|
||||
}
|
||||
|
||||
@@ -98,6 +102,8 @@ var (
|
||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||
CosmosDBDNSSuffix: "documents.azure.com",
|
||||
TokenAudience: "https://management.azure.com/",
|
||||
APIManagementHostNameSuffix: "azure-api.net",
|
||||
SynapseEndpointSuffix: "dev.azuresynapse.net",
|
||||
ResourceIdentifiers: ResourceIdentifier{
|
||||
Graph: "https://graph.windows.net/",
|
||||
KeyVault: "https://vault.azure.net",
|
||||
@@ -105,6 +111,8 @@ var (
|
||||
Batch: "https://batch.core.windows.net/",
|
||||
OperationalInsights: "https://api.loganalytics.io",
|
||||
Storage: "https://storage.azure.com/",
|
||||
Synapse: "https://dev.azuresynapse.net",
|
||||
ServiceBus: "https://servicebus.azure.net/",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -131,6 +139,8 @@ var (
|
||||
ContainerRegistryDNSSuffix: "azurecr.us",
|
||||
CosmosDBDNSSuffix: "documents.azure.us",
|
||||
TokenAudience: "https://management.usgovcloudapi.net/",
|
||||
APIManagementHostNameSuffix: "azure-api.us",
|
||||
SynapseEndpointSuffix: NotAvailable,
|
||||
ResourceIdentifiers: ResourceIdentifier{
|
||||
Graph: "https://graph.windows.net/",
|
||||
KeyVault: "https://vault.usgovcloudapi.net",
|
||||
@@ -138,6 +148,8 @@ var (
|
||||
Batch: "https://batch.core.usgovcloudapi.net/",
|
||||
OperationalInsights: "https://api.loganalytics.us",
|
||||
Storage: "https://storage.azure.com/",
|
||||
Synapse: NotAvailable,
|
||||
ServiceBus: "https://servicebus.azure.net/",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -164,6 +176,8 @@ var (
|
||||
ContainerRegistryDNSSuffix: "azurecr.cn",
|
||||
CosmosDBDNSSuffix: "documents.azure.cn",
|
||||
TokenAudience: "https://management.chinacloudapi.cn/",
|
||||
APIManagementHostNameSuffix: "azure-api.cn",
|
||||
SynapseEndpointSuffix: "dev.azuresynapse.azure.cn",
|
||||
ResourceIdentifiers: ResourceIdentifier{
|
||||
Graph: "https://graph.chinacloudapi.cn/",
|
||||
KeyVault: "https://vault.azure.cn",
|
||||
@@ -171,6 +185,8 @@ var (
|
||||
Batch: "https://batch.chinacloudapi.cn/",
|
||||
OperationalInsights: NotAvailable,
|
||||
Storage: "https://storage.azure.com/",
|
||||
Synapse: "https://dev.azuresynapse.net",
|
||||
ServiceBus: "https://servicebus.azure.net/",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -197,6 +213,8 @@ var (
|
||||
ContainerRegistryDNSSuffix: NotAvailable,
|
||||
CosmosDBDNSSuffix: "documents.microsoftazure.de",
|
||||
TokenAudience: "https://management.microsoftazure.de/",
|
||||
APIManagementHostNameSuffix: NotAvailable,
|
||||
SynapseEndpointSuffix: NotAvailable,
|
||||
ResourceIdentifiers: ResourceIdentifier{
|
||||
Graph: "https://graph.cloudapi.de/",
|
||||
KeyVault: "https://vault.microsoftazure.de",
|
||||
@@ -204,6 +222,8 @@ var (
|
||||
Batch: "https://batch.cloudapi.de/",
|
||||
OperationalInsights: NotAvailable,
|
||||
Storage: "https://storage.azure.com/",
|
||||
Synapse: NotAvailable,
|
||||
ServiceBus: "https://servicebus.azure.net/",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
6
vendor/github.com/Azure/go-autorest/autorest/go.mod
generated
vendored
6
vendor/github.com/Azure/go-autorest/autorest/go.mod
generated
vendored
@@ -4,9 +4,9 @@ go 1.12
|
||||
|
||||
require (
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.0
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.0
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1
|
||||
github.com/Azure/go-autorest/logger v0.2.0
|
||||
github.com/Azure/go-autorest/tracing v0.6.0
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0
|
||||
)
|
||||
|
||||
16
vendor/github.com/Azure/go-autorest/autorest/go.sum
generated
vendored
16
vendor/github.com/Azure/go-autorest/autorest/go.sum
generated
vendored
@@ -1,21 +1,21 @@
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
|
||||
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
||||
29
vendor/github.com/Azure/go-autorest/autorest/preparer.go
generated
vendored
29
vendor/github.com/Azure/go-autorest/autorest/preparer.go
generated
vendored
@@ -127,10 +127,7 @@ func WithHeader(header string, value string) PrepareDecorator {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err == nil {
|
||||
if r.Header == nil {
|
||||
r.Header = make(http.Header)
|
||||
}
|
||||
r.Header.Set(http.CanonicalHeaderKey(header), value)
|
||||
setHeader(r, http.CanonicalHeaderKey(header), value)
|
||||
}
|
||||
return r, err
|
||||
})
|
||||
@@ -230,7 +227,7 @@ func AsPost() PrepareDecorator { return WithMethod("POST") }
|
||||
func AsPut() PrepareDecorator { return WithMethod("PUT") }
|
||||
|
||||
// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed
|
||||
// from the supplied baseUrl.
|
||||
// from the supplied baseUrl. Query parameters will be encoded as required.
|
||||
func WithBaseURL(baseURL string) PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
@@ -241,11 +238,16 @@ func WithBaseURL(baseURL string) PrepareDecorator {
|
||||
return r, err
|
||||
}
|
||||
if u.Scheme == "" {
|
||||
err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL)
|
||||
return r, fmt.Errorf("autorest: No scheme detected in URL %s", baseURL)
|
||||
}
|
||||
if err == nil {
|
||||
r.URL = u
|
||||
if u.RawQuery != "" {
|
||||
q, err := url.ParseQuery(u.RawQuery)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
u.RawQuery = q.Encode()
|
||||
}
|
||||
r.URL = u
|
||||
}
|
||||
return r, err
|
||||
})
|
||||
@@ -290,10 +292,7 @@ func WithFormData(v url.Values) PrepareDecorator {
|
||||
if err == nil {
|
||||
s := v.Encode()
|
||||
|
||||
if r.Header == nil {
|
||||
r.Header = make(http.Header)
|
||||
}
|
||||
r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost)
|
||||
setHeader(r, http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost)
|
||||
r.ContentLength = int64(len(s))
|
||||
r.Body = ioutil.NopCloser(strings.NewReader(s))
|
||||
}
|
||||
@@ -329,10 +328,7 @@ func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDec
|
||||
if err = writer.Close(); err != nil {
|
||||
return r, err
|
||||
}
|
||||
if r.Header == nil {
|
||||
r.Header = make(http.Header)
|
||||
}
|
||||
r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType())
|
||||
setHeader(r, http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType())
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
|
||||
r.ContentLength = int64(body.Len())
|
||||
return r, err
|
||||
@@ -437,6 +433,7 @@ func WithXML(v interface{}) PrepareDecorator {
|
||||
bytesWithHeader := []byte(withHeader)
|
||||
|
||||
r.ContentLength = int64(len(bytesWithHeader))
|
||||
setHeader(r, headerContentLength, fmt.Sprintf("%d", len(bytesWithHeader)))
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader))
|
||||
}
|
||||
}
|
||||
|
||||
63
vendor/github.com/Azure/go-autorest/autorest/sender.go
generated
vendored
63
vendor/github.com/Azure/go-autorest/autorest/sender.go
generated
vendored
@@ -23,11 +23,29 @@ import (
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
)
|
||||
|
||||
// there is one sender per TLS renegotiation type, i.e. count of tls.RenegotiationSupport enums
|
||||
const defaultSendersCount = 3
|
||||
|
||||
type defaultSender struct {
|
||||
sender Sender
|
||||
init *sync.Once
|
||||
}
|
||||
|
||||
// each type of sender will be created on demand in sender()
|
||||
var defaultSenders [defaultSendersCount]defaultSender
|
||||
|
||||
func init() {
|
||||
for i := 0; i < defaultSendersCount; i++ {
|
||||
defaultSenders[i].init = &sync.Once{}
|
||||
}
|
||||
}
|
||||
|
||||
// used as a key type in context.WithValue()
|
||||
type ctxSendDecorators struct{}
|
||||
|
||||
@@ -107,26 +125,31 @@ func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*ht
|
||||
}
|
||||
|
||||
func sender(renengotiation tls.RenegotiationSupport) Sender {
|
||||
// Use behaviour compatible with DefaultTransport, but require TLS minimum version.
|
||||
defaultTransport := http.DefaultTransport.(*http.Transport)
|
||||
transport := &http.Transport{
|
||||
Proxy: defaultTransport.Proxy,
|
||||
DialContext: defaultTransport.DialContext,
|
||||
MaxIdleConns: defaultTransport.MaxIdleConns,
|
||||
IdleConnTimeout: defaultTransport.IdleConnTimeout,
|
||||
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
|
||||
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
|
||||
TLSClientConfig: &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
Renegotiation: renengotiation,
|
||||
},
|
||||
}
|
||||
var roundTripper http.RoundTripper = transport
|
||||
if tracing.IsEnabled() {
|
||||
roundTripper = tracing.NewTransport(transport)
|
||||
}
|
||||
j, _ := cookiejar.New(nil)
|
||||
return &http.Client{Jar: j, Transport: roundTripper}
|
||||
// note that we can't init defaultSenders in init() since it will
|
||||
// execute before calling code has had a chance to enable tracing
|
||||
defaultSenders[renengotiation].init.Do(func() {
|
||||
// Use behaviour compatible with DefaultTransport, but require TLS minimum version.
|
||||
defaultTransport := http.DefaultTransport.(*http.Transport)
|
||||
transport := &http.Transport{
|
||||
Proxy: defaultTransport.Proxy,
|
||||
DialContext: defaultTransport.DialContext,
|
||||
MaxIdleConns: defaultTransport.MaxIdleConns,
|
||||
IdleConnTimeout: defaultTransport.IdleConnTimeout,
|
||||
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
|
||||
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
|
||||
TLSClientConfig: &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
Renegotiation: renengotiation,
|
||||
},
|
||||
}
|
||||
var roundTripper http.RoundTripper = transport
|
||||
if tracing.IsEnabled() {
|
||||
roundTripper = tracing.NewTransport(transport)
|
||||
}
|
||||
j, _ := cookiejar.New(nil)
|
||||
defaultSenders[renengotiation].sender = &http.Client{Jar: j, Transport: roundTripper}
|
||||
})
|
||||
return defaultSenders[renengotiation].sender
|
||||
}
|
||||
|
||||
// AfterDelay returns a SendDecorator that delays for the passed time.Duration before
|
||||
|
||||
7
vendor/github.com/Azure/go-autorest/autorest/utility.go
generated
vendored
7
vendor/github.com/Azure/go-autorest/autorest/utility.go
generated
vendored
@@ -237,3 +237,10 @@ func DrainResponseBody(resp *http.Response) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setHeader(r *http.Request, key, value string) {
|
||||
if r.Header == nil {
|
||||
r.Header = make(http.Header)
|
||||
}
|
||||
r.Header.Set(key, value)
|
||||
}
|
||||
|
||||
5
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
Normal file
5
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
TAGS
|
||||
tags
|
||||
.*.swp
|
||||
tomlcheck/tomlcheck
|
||||
toml.test
|
||||
15
vendor/github.com/BurntSushi/toml/.travis.yml
generated
vendored
Normal file
15
vendor/github.com/BurntSushi/toml/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
||||
install:
|
||||
- go install ./...
|
||||
- go get github.com/BurntSushi/toml-test
|
||||
script:
|
||||
- export PATH="$PATH:$HOME/gopath/bin"
|
||||
- make test
|
||||
3
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
Normal file
3
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
Compatible with TOML version
|
||||
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
|
||||
|
||||
21
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
Normal file
21
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 TOML authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
19
vendor/github.com/BurntSushi/toml/Makefile
generated
vendored
Normal file
19
vendor/github.com/BurntSushi/toml/Makefile
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
install:
|
||||
go install ./...
|
||||
|
||||
test: install
|
||||
go test -v
|
||||
toml-test toml-test-decoder
|
||||
toml-test -encoder toml-test-encoder
|
||||
|
||||
fmt:
|
||||
gofmt -w *.go */*.go
|
||||
colcheck *.go */*.go
|
||||
|
||||
tags:
|
||||
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
|
||||
|
||||
push:
|
||||
git push origin master
|
||||
git push github master
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user