1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Compare commits

...

71 Commits

Author SHA1 Message Date
Kubernetes Prow Robot
9c110c4004 Merge pull request #791 from JaneLiuL/master
Bump to k8s 1.24.0
2022-05-12 12:06:33 -07:00
Kubernetes Prow Robot
0eddf7f108 Merge pull request #792 from pravarag/update-docs-1.24
Update Docs and Manifests for v0.24.0
2022-05-12 11:31:15 -07:00
Kubernetes Prow Robot
3c8d6c4d53 Merge pull request #795 from damemi/update-e2e
Update e2e test versions
2022-05-12 09:29:14 -07:00
Mike Dame
6e84d0a6ba React to removal of offensive language
https://github.com/kubernetes/kubeadm/issues/2200 went into effect in 1.24, so
e2es broke without the update.
2022-05-12 15:35:07 +00:00
Mike Dame
fb1df468ad golint fix 2022-05-12 14:21:34 +00:00
Mike Dame
ac4d576df8 Update e2e test versions 2022-05-12 14:16:53 +00:00
Pravar Agrawal
314ad65b04 Update docs and manifests for v0.24.0 2022-05-04 22:08:49 +05:30
JaneLiuL
969a618933 Bump to k8s 1.24.0 2022-05-04 10:17:47 +08:00
Kubernetes Prow Robot
028f205e8c Merge pull request #790 from ingvagabund/636
Added request considerations to NodeFit Feature [#636 follow up]
2022-05-03 19:09:16 -07:00
Jan Chaloupka
3eca2782d4 Addressing review comments
Both LowNode and HighNode utilization strategies evict only as many pods
as there's free resources on other nodes. Thus, the resource fit test
is always true by definition.
2022-04-28 18:54:54 +02:00
RyanDevlin
16eb9063b6 NodeFit parameter now considers pod requests 2022-04-28 10:16:52 +02:00
Kubernetes Prow Robot
eac3b4b54a Merge pull request #788 from ryan4yin/master
fix: incorrect yaml indentation in readme
2022-04-26 06:46:53 -07:00
Ryan Yin
d08cea731a fix: incorrect indentation 2022-04-26 06:05:12 +08:00
Kubernetes Prow Robot
0fc5ba9316 Merge pull request #787 from JaneLiuL/master
bump to k8s 1.24-rc.0
2022-04-25 12:05:43 -07:00
JaneLiuL
ecbd10afe2 bump to k8s 1.24-rc.0 2022-04-21 09:11:04 +08:00
Kubernetes Prow Robot
e5ed0540f2 Merge pull request #779 from pravarag/user-docs-typo
Fix missing param in user-guide for PodLifeTime strategy
2022-04-11 01:44:06 -07:00
Pravar Agrawal
4e972a7602 fix missing param in user-guide 2022-04-07 10:02:26 +05:30
Kubernetes Prow Robot
ae20b5b034 Merge pull request #732 from eminaktas/feature/metric-scape
feat: Add metric scrape configs in Helm Chart
2022-03-30 07:06:27 -07:00
Kubernetes Prow Robot
406e3ed5b3 Merge pull request #771 from dineshbhor/fix-highnodeutilization-node-sorting
Sort nodes in ascending order for HighNodeUtilization
2022-03-29 02:58:47 -07:00
dineshbhor
7589aaf00b Sort nodes in ascending order for HighNodeUtilization 2022-03-29 17:54:18 +09:00
eminaktas
ca90b53913 feat: Add metric scrape configs in Helm Chart
Signed-off-by: eminaktas <emin.aktas@trendyol.com>
2022-03-28 23:41:56 +03:00
Kubernetes Prow Robot
238eebeaca Merge pull request #722 from Dentrax/feature/leaderelection
feat(leaderelection): impl leader election for HA Deployment
2022-03-28 09:39:23 -07:00
Kubernetes Prow Robot
cf59d08193 Merge pull request #751 from HelmutLety/redo_#473
feat: Add DeviationThreshold Paramter for LowNodeUtilization, (Previous attempt - #473 )
2022-03-28 03:53:24 -07:00
HelmutLety
2ea65e69dc feat(LowNodeUtilization): useDeviationThresholds, redo of #473
[751]: normalize Percentage in nodeutilization and clean the tests
2022-03-28 12:35:01 +02:00
Kubernetes Prow Robot
7f6a2a69b0 Merge pull request #777 from JacobHenner/support-taint-exclusions
Add RemovePodsViolatingNodeTaints taint exclusion
2022-03-28 02:47:23 -07:00
Jacob Henner
ac3362149b Add RemovePodsViolatingNodeTaints taint exclusion
Add taint exclusion to RemovePodsViolatingNodeTaints. This permits node
taints to be ignored by allowing users to specify ignored taint keys or
ignored taint key=value pairs.
2022-03-27 13:48:40 -04:00
Furkan
0a52af9ab8 feat(leaderelection): impl leader election
Signed-off-by: Furkan <furkan.turkal@trendyol.com>
Signed-off-by: eminaktas <eminaktas34@gmail.com>
Co-authored-by: Emin <emin.aktas@trendyol.com>
Co-authored-by: Yasin <yasintaha.erol@trendyol.com>
2022-03-25 14:33:14 +03:00
Kubernetes Prow Robot
07bbdc61c4 Merge pull request #762 from ingvagabund/nodeutilization-refactor
Promote NodeUsage to NodeInfo, evaluate thresholds separately
2022-03-15 17:33:48 -07:00
Kubernetes Prow Robot
17595fdcfc Merge pull request #764 from ingvagabund/taints-prefer-no-scheduler
RemovePodsViolatingNodeTaints: optionally include PreferNoSchedule taint
2022-03-14 17:36:10 -07:00
Jan Chaloupka
285523f0d9 RemovePodsViolatingNodeTaints: optionally include PreferNoSchedule taint 2022-03-14 16:46:03 +01:00
Kubernetes Prow Robot
c55a897599 Merge pull request #759 from JaneLiuL/master
OWNERS: add janeliul as a reviewer
2022-03-11 10:29:07 -08:00
Jan Chaloupka
52ff50f2d1 Promote NodeUsage to NodeInfo, evaluate thresholds separately 2022-03-11 13:52:37 +01:00
Jan Chaloupka
8ebf3fb323 nodeutilization: move node resource threshold value computation under a separate function 2022-03-11 12:46:11 +01:00
Kubernetes Prow Robot
0e0ae8df90 Merge pull request #761 from ingvagabund/TestTooManyRestarts-II
[e2e] TestTooManyRestarts: check if container status is set before accessing
2022-03-11 02:29:06 -08:00
Jan Chaloupka
bd3daa82d3 [e2e] TestTooManyRestarts: check if container status is set before accessing 2022-03-11 10:35:49 +01:00
Kubernetes Prow Robot
60a15f0392 Merge pull request #760 from ingvagabund/TestTooManyRestarts
[e2e] TestTooManyRestarts: check err and len before accessing pod items
2022-03-11 01:09:07 -08:00
Jan Chaloupka
d98cb84568 [e2e] TestTooManyRestarts: check err and len before accessing pod items 2022-03-11 09:45:05 +01:00
Kubernetes Prow Robot
6ab01eca63 Merge pull request #758 from hiroyaonoe/add-doc-about-max-no-of-pods-to-evict-per-namespace-policy
Update docs for maxNoOfPodsToEvictPerNamespace
2022-03-10 11:25:21 -08:00
Kubernetes Prow Robot
584ac2d604 Merge pull request #757 from prune998/prune/taint-logs
add conflicting taint to the logs
2022-03-10 05:37:35 -08:00
prune
448dc4784c add conflicting taint to the logs
log when count mismatch


simplified logic to log blocking taints
2022-03-10 08:05:42 -05:00
JaneLiuL
3ca77e7a3d OWNERS: add janeliul as a reviewer 2022-03-08 07:48:11 +08:00
Hiroya Onoe
01e7015b97 Update docs for maxNoOfPodsToEvictPerNamespace 2022-03-07 16:21:04 +09:00
Kubernetes Prow Robot
fd5a8c7d78 Merge pull request #739 from JaneLiuL/master
Share links to all descheduler ehnacements proposals in the project repo
2022-03-02 09:55:14 -08:00
Kubernetes Prow Robot
43148ecd0c Merge pull request #740 from JaneLiuL/doc-npd
fix doc about NPD description
2022-03-01 09:59:55 -08:00
Kubernetes Prow Robot
16501978dc Merge pull request #748 from damemi/update-v0.23.1
Update manifests and doc for v0.23.1
2022-03-01 07:47:46 -08:00
Mike Dame
1b4e48b006 Update manifests and doc for v0.23.1 2022-02-28 19:06:50 +00:00
Kubernetes Prow Robot
da6a3e063f Merge pull request #744 from antonio-te/master
Update golang image
2022-02-28 10:41:46 -08:00
Antonio Gurgel
5784c0cc04 Update golang image
1.17.3 is affected by CVE-2021-44716.
2022-02-28 07:22:26 -08:00
JaneLiuL
254a3a9ec1 Share links to all descheduler ehnacements proposals in the project repository 2022-02-26 12:27:35 +08:00
JaneLiuL
328c695141 fix doc about NPD description 2022-02-26 12:23:33 +08:00
Kubernetes Prow Robot
3ab0268c5a Merge pull request #733 from JaneLiuL/master
remove MostRequestedPriority from doc since already deprecated
2022-02-24 04:32:32 -08:00
Jane Liu L
cd8dbdd1e2 remove MostRequestedPriority from doc since already deprecated 2022-02-24 09:00:36 +08:00
Kubernetes Prow Robot
54c50c5390 Merge pull request #731 from jklaw90/fix-ctx-cron
Bugfix: Cronjob ctx cancel
2022-02-22 11:35:18 -08:00
Julian Lawrence
a2cbc25397 updated to handle cronjob flow 2022-02-22 08:52:06 -08:00
Kubernetes Prow Robot
bd81f6436e Merge pull request #708 from damemi/utilization-values-readme
Clarify resource calculations in NodeUtilization strategy Readmes
2022-02-22 04:47:46 -08:00
Kubernetes Prow Robot
30be19b04e Merge pull request #715 from eminaktas/values-fix
fix: Remove deprecated parameters from cmdOptions and add the parameters under policy
2022-02-18 05:08:23 -08:00
Kubernetes Prow Robot
3c251fb09d Merge pull request #726 from jklaw90/log-eviction-node
Eviction Logs
2022-02-15 04:08:03 -08:00
Julian Lawrence
224e2b078f updated logs to help with debugging 2022-02-14 18:27:53 -08:00
Kubernetes Prow Robot
dd80d60f4f Merge pull request #716 from eminaktas/imagepullsecret
fix: add imagePullSecrets for deployment resource
2022-02-14 05:27:29 -08:00
Kubernetes Prow Robot
e88837a349 Merge pull request #704 from ingvagabund/update-chart-readme
Update charts README to reflect the new parameters
2022-02-11 14:23:46 -08:00
Kubernetes Prow Robot
5901f8af1b Merge pull request #697 from a7i/code-reviewer
OWNERS: add a7i as a reviewer
2022-02-11 08:14:23 -08:00
Kubernetes Prow Robot
0d1704a192 Merge pull request #717 from JaneLiuL/release-1.23.1
[release-1.23.1] Update helm chart version to v0.23.1
2022-02-08 04:34:54 -08:00
JaneLiuL
c5878b18c6 Update helm chart version to v0.23.1 2022-02-08 20:21:57 +08:00
emin.aktas
ff1954b32e fix: add imagePullSecrets for deployment resource
Signed-off-by: emin.aktas <eminaktas34@gmail.com>
Co-authored-by: yasintahaerol <yasintahaerol@gmail.com>
Co-authored-by: Dentrax <furkan.turkal@trendyol.com>
2022-02-07 18:05:18 +03:00
emin.aktas
4c8040bbaf fix: Remove deprecated parameters from cmdOptions and add the parameters under policy 2022-02-07 15:14:55 +03:00
Kubernetes Prow Robot
deaa314492 Merge pull request #712 from JaneLiuL/helm
fix helmchart fail to watch namespace issue
2022-02-06 10:36:51 -08:00
Jane Liu L
9c653a2274 fix helmchart fail to watch namespace issue 2022-02-04 18:34:21 +08:00
Kubernetes Prow Robot
8d37557743 Merge pull request #709 from damemi/update-helm-23
Update helm chart version to v0.23
2022-02-03 12:10:58 -08:00
Mike Dame
c51c066cd1 Clarify resource calculations in NodeUtilization strategy Readmes
This adds text explaining the resource calculation in LowNodeUtilization and HighNodeUtilization
2022-01-30 12:59:47 -05:00
Jan Chaloupka
90e6174fdd Update charts README to reflect the new parameters 2022-01-27 14:46:15 +01:00
Amir Alavi
0251935268 OWNERS: add a7i as a reviewer 2022-01-18 09:14:44 -05:00
1017 changed files with 36088 additions and 29823 deletions

View File

@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
FROM golang:1.17.3 FROM golang:1.17.7
WORKDIR /go/src/sigs.k8s.io/descheduler WORKDIR /go/src/sigs.k8s.io/descheduler
COPY . . COPY . .

View File

@@ -24,7 +24,7 @@ ARCHS = amd64 arm arm64
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}" LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
GOLANGCI_VERSION := v1.43.0 GOLANGCI_VERSION := v1.46.1
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null) HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
# REGISTRY is the container registry to push # REGISTRY is the container registry to push
@@ -144,4 +144,4 @@ test-helm: ensure-helm-install
ensure-helm-install: ensure-helm-install:
ifndef HAS_HELM ifndef HAS_HELM
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
endif endif

2
OWNERS
View File

@@ -10,6 +10,8 @@ reviewers:
- seanmalloy - seanmalloy
- ingvagabund - ingvagabund
- lixiang233 - lixiang233
- a7i
- janeliul
emeritus_approvers: emeritus_approvers:
- aveshagarwal - aveshagarwal
- k82cn - k82cn

111
README.md
View File

@@ -50,6 +50,8 @@ Table of Contents
- [Node Fit filtering](#node-fit-filtering) - [Node Fit filtering](#node-fit-filtering)
- [Pod Evictions](#pod-evictions) - [Pod Evictions](#pod-evictions)
- [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb) - [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb)
- [High Availability](#high-availability)
- [Configure HA Mode](#configure-ha-mode)
- [Metrics](#metrics) - [Metrics](#metrics)
- [Compatibility Matrix](#compatibility-matrix) - [Compatibility Matrix](#compatibility-matrix)
- [Getting Involved and Contributing](#getting-involved-and-contributing) - [Getting Involved and Contributing](#getting-involved-and-contributing)
@@ -103,17 +105,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
Run As A Job Run As A Job
``` ```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.22.0' | kubectl apply -f - kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.24.0' | kubectl apply -f -
``` ```
Run As A CronJob Run As A CronJob
``` ```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.22.0' | kubectl apply -f - kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.24.0' | kubectl apply -f -
``` ```
Run As A Deployment Run As A Deployment
``` ```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.22.0' | kubectl apply -f - kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.24.0' | kubectl apply -f -
``` ```
## User Guide ## User Guide
@@ -132,6 +134,7 @@ The policy includes a common configuration that applies to all the strategies:
| `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns | | `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
| `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored | | `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored |
| `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) | | `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) |
| `maxNoOfPodsToEvictPerNamespace` | `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
| `evictFailedBarePods` | `false` | allow eviction of pods without owner references and in failed phase | | `evictFailedBarePods` | `false` | allow eviction of pods without owner references and in failed phase |
As part of the policy, the parameters associated with each strategy can be configured. As part of the policy, the parameters associated with each strategy can be configured.
@@ -218,6 +221,17 @@ These thresholds, `thresholds` and `targetThresholds`, could be tuned as per you
strategy evicts pods from `overutilized nodes` (those with usage above `targetThresholds`) to `underutilized nodes` strategy evicts pods from `overutilized nodes` (those with usage above `targetThresholds`) to `underutilized nodes`
(those with usage below `thresholds`), it will abort if any number of `underutilized nodes` or `overutilized nodes` is zero. (those with usage below `thresholds`), it will abort if any number of `underutilized nodes` or `overutilized nodes` is zero.
Additionally, the strategy accepts a `useDeviationThresholds` parameter.
If that parameter is set to `true`, the thresholds are considered as percentage deviations from mean resource usage.
`thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean.
A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization).
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
**Parameters:** **Parameters:**
|Name|Type| |Name|Type|
@@ -225,6 +239,7 @@ strategy evicts pods from `overutilized nodes` (those with usage above `targetTh
|`thresholds`|map(string:int)| |`thresholds`|map(string:int)|
|`targetThresholds`|map(string:int)| |`targetThresholds`|map(string:int)|
|`numberOfNodes`|int| |`numberOfNodes`|int|
|`useDeviationThresholds`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))| |`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))| |`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))| |`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
@@ -265,10 +280,10 @@ under utilized frequently or for a short period of time. By default, `numberOfNo
### HighNodeUtilization ### HighNodeUtilization
This strategy finds nodes that are under utilized and evicts pods from the nodes in the hope that these pods will be This strategy finds nodes that are under utilized and evicts pods from the nodes in the hope that these pods will be
scheduled compactly into fewer nodes. Used in conjunction with node auto-scaling, this strategy is intended to help scheduled compactly into fewer nodes. Used in conjunction with node auto-scaling, this strategy is intended to help
trigger down scaling of under utilized nodes. trigger down scaling of under utilized nodes.
This strategy **must** be used with the scheduler strategy `MostRequestedPriority`. The parameters of this strategy are This strategy **must** be used with the scheduler scoring strategy `MostAllocated`. The parameters of this strategy are
configured under `nodeResourceUtilizationThresholds`. configured under `nodeResourceUtilizationThresholds`.
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
@@ -285,6 +300,12 @@ strategy evicts pods from `underutilized nodes` (those with usage below `thresho
so that they can be recreated in appropriately utilized nodes. so that they can be recreated in appropriately utilized nodes.
The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero. The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero.
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
**Parameters:** **Parameters:**
|Name|Type| |Name|Type|
@@ -399,10 +420,17 @@ pod "podA" with a toleration to tolerate a taint ``key=value:NoSchedule`` schedu
node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations
and will be evicted. and will be evicted.
Node taints can be excluded from consideration by specifying a list of excludedTaints. If a node taint key **or**
key=value matches an excludedTaints entry, the taint will be ignored.
For example, excludedTaints entry "dedicated" would match all taints with key "dedicated", regardless of value.
excludedTaints entry "dedicated=special-user" would match taints with key "dedicated" and value "special-user".
**Parameters:** **Parameters:**
|Name|Type| |Name|Type|
|---|---| |---|---|
|`excludedTaints`|list(string)|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))| |`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))| |`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))| |`namespaces`|(see [namespace filtering](#namespace-filtering))|
@@ -417,6 +445,10 @@ kind: "DeschedulerPolicy"
strategies: strategies:
"RemovePodsViolatingNodeTaints": "RemovePodsViolatingNodeTaints":
enabled: true enabled: true
params:
excludedTaints:
- dedicated=special-user # exclude taints with key "dedicated" and value "special-user"
- reserved # exclude all taints with key "reserved"
```` ````
### RemovePodsViolatingTopologySpreadConstraint ### RemovePodsViolatingTopologySpreadConstraint
@@ -657,7 +689,7 @@ does not exist, descheduler won't create it and will throw an error.
### Label filtering ### Label filtering
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)
to filter pods by their labels: to filter pods by their labels:
* `PodLifeTime` * `PodLifeTime`
@@ -705,8 +737,9 @@ The following strategies accept a `nodeFit` boolean parameter which can optimize
If set to `true` the descheduler will consider whether or not the pods that meet eviction criteria will fit on other nodes before evicting them. If a pod cannot be rescheduled to another node, it will not be evicted. Currently the following criteria are considered when setting `nodeFit` to `true`: If set to `true` the descheduler will consider whether or not the pods that meet eviction criteria will fit on other nodes before evicting them. If a pod cannot be rescheduled to another node, it will not be evicted. Currently the following criteria are considered when setting `nodeFit` to `true`:
- A `nodeSelector` on the pod - A `nodeSelector` on the pod
- Any `Tolerations` on the pod and any `Taints` on the other nodes - Any `tolerations` on the pod and any `taints` on the other nodes
- `nodeAffinity` on the pod - `nodeAffinity` on the pod
- Resource `requests` made by the pod and the resources available on other nodes
- Whether any of the other nodes are marked as `unschedulable` - Whether any of the other nodes are marked as `unschedulable`
E.g. E.g.
@@ -716,17 +749,17 @@ apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy" kind: "DeschedulerPolicy"
strategies: strategies:
"LowNodeUtilization": "LowNodeUtilization":
enabled: true enabled: true
params: params:
nodeResourceUtilizationThresholds: nodeResourceUtilizationThresholds:
thresholds: thresholds:
"cpu" : 20 "cpu": 20
"memory": 20 "memory": 20
"pods": 20 "pods": 20
targetThresholds: targetThresholds:
"cpu" : 50 "cpu": 50
"memory": 50 "memory": 50
"pods": 50 "pods": 50
nodeFit: true nodeFit: true
``` ```
@@ -761,6 +794,23 @@ Setting `--v=4` or greater on the Descheduler will log all reasons why any pod i
Pods subject to a Pod Disruption Budget(PDB) are not evicted if descheduling violates its PDB. The pods Pods subject to a Pod Disruption Budget(PDB) are not evicted if descheduling violates its PDB. The pods
are evicted by using the eviction subresource to handle PDB. are evicted by using the eviction subresource to handle PDB.
## High Availability
In High Availability mode, Descheduler starts [leader election](https://github.com/kubernetes/client-go/tree/master/tools/leaderelection) process in Kubernetes. You can activate HA mode
if you choose to deploy your application as Deployment.
Deployment starts with 1 replica by default. If you want to use more than 1 replica, you must consider
enable High Availability mode since we don't want to run descheduler pods simultaneously.
### Configure HA Mode
The leader election process can be enabled by setting `--leader-elect` in the CLI. You can also set
`--set=leaderElection.enabled=true` flag if you are using Helm.
To get best results from HA mode some additional configurations might require:
* Configure a [podAntiAffinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node) rule if you want to schedule onto a node only if that node is in the same zone as at least one already-running descheduler
* Set the replica count greater than 1
## Metrics ## Metrics
| name | type | description | | name | type | description |
@@ -780,17 +830,18 @@ v0.18 should work with k8s v1.18, v1.17, and v1.16.
Starting with descheduler release v0.18 the minor version of descheduler matches the minor version of the k8s client Starting with descheduler release v0.18 the minor version of descheduler matches the minor version of the k8s client
packages that it is compiled with. packages that it is compiled with.
Descheduler | Supported Kubernetes Version | Descheduler | Supported Kubernetes Version |
-------------|----------------------------- |-------------|------------------------------|
v0.22 | v1.22 | v0.24 | v1.24 |
v0.21 | v1.21 | v0.23 | v1.23 |
v0.20 | v1.20 | v0.22 | v1.22 |
v0.19 | v1.19 | v0.21 | v1.21 |
v0.18 | v1.18 | v0.20 | v1.20 |
v0.10 | v1.17 | v0.19 | v1.19 |
v0.4-v0.9 | v1.9+ | v0.18 | v1.18 |
v0.1-v0.3 | v1.7-v1.8 | v0.10 | v1.17 |
| v0.4-v0.9 | v1.9+ |
| v0.1-v0.3 | v1.7-v1.8 |
## Getting Involved and Contributing ## Getting Involved and Contributing

View File

@@ -1,6 +1,6 @@
apiVersion: v1 apiVersion: v1
name: descheduler name: descheduler
version: 0.23.0 version: 0.23.1
appVersion: 0.23.0 appVersion: 0.23.0
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that. description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
keywords: keywords:

View File

@@ -43,30 +43,44 @@ The command removes all the Kubernetes components associated with the chart and
The following table lists the configurable parameters of the _descheduler_ chart and their default values. The following table lists the configurable parameters of the _descheduler_ chart and their default values.
| Parameter | Description | Default | | Parameter | Description | Default |
| ------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------ | |-------------------------------------|-----------------------------------------------------------------------------------------------------------------------|--------------------------------------|
| `kind` | Use as CronJob or Deployment | `CronJob` | | `kind` | Use as CronJob or Deployment | `CronJob` |
| `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` | | `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` |
| `image.tag` | Docker tag to use | `v[chart appVersion]` | | `image.tag` | Docker tag to use | `v[chart appVersion]` |
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` | | `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
| `imagePullSecrets` | Docker repository secrets | `[]` | | `imagePullSecrets` | Docker repository secrets | `[]` |
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` | | `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` | | `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` | | `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` | | `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` | | `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` | | `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` |
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `nil` | | `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `nil` |
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` | | `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ | | `replicas` | The replica count for Deployment | `1` |
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ | | `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` | | `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
| `rbac.create` | If `true`, create & use RBAC resources | `true` | | `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
| `podSecurityPolicy.create` | If `true`, create PodSecurityPolicy | `true` | | `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ | | `rbac.create` | If `true`, create & use RBAC resources | `true` |
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` | | `podSecurityPolicy.create` | If `true`, create PodSecurityPolicy | `true` |
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` | | `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
| `nodeSelector` | Node selectors to run the descheduler cronjob on specific nodes | `nil` | | `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
| `tolerations` | tolerations to run the descheduler cronjob on specific nodes | `nil` | | `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
| `suspend` | Set spec.suspend in descheduler cronjob | `false` | | `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
| `commonLabels` | Labels to apply to all resources | `{}` | | `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `service.enabled` | If `true`, create a service for deployment | `false` |
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
| `serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `nil` |
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
| `commonLabels` | Labels to apply to all resources | `{}` |
| `livenessProbe` | Liveness probe configuration for the descheduler container | _see values.yaml_ |

View File

@@ -1 +1,7 @@
Descheduler installed as a {{ .Values.kind }} . Descheduler installed as a {{ .Values.kind }}.
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.replicas 1.0}}
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
{{- end}}
{{- end}}

View File

@@ -65,3 +65,37 @@ Create the name of the service account to use
{{ default "default" .Values.serviceAccount.name }} {{ default "default" .Values.serviceAccount.name }}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/*
Leader Election
*/}}
{{- define "descheduler.leaderElection"}}
{{- if .Values.leaderElection -}}
- --leader-elect
- {{ default false .Values.leaderElection.enabled }}
{{- if .Values.leaderElection.leaseDuration }}
- --leader-elect-lease-duration
- {{ .Values.leaderElection.leaseDuration }}
{{- end }}
{{- if .Values.leaderElection.renewDeadline }}
- --leader-elect-renew-deadline
- {{ .Values.leaderElection.renewDeadline }}
{{- end }}
{{- if .Values.leaderElection.retryPeriod }}
- --leader-elect-retry-period
- {{ .Values.leaderElection.retryPeriod }}
{{- end }}
{{- if .Values.leaderElection.resourceLock }}
- --leader-elect-resource-lock
- {{ .Values.leaderElection.resourceLock }}
{{- end }}
{{- if .Values.leaderElection.resourceName }}
- --leader-elect-resource-name
- {{ .Values.leaderElection.resourceName }}
{{- end }}
{{- if .Values.leaderElection.resourceNamescape }}
- --leader-elect-resource-namespace
- {{ .Values.leaderElection.resourceNamescape }}
{{- end -}}
{{- end }}
{{- end }}

View File

@@ -14,7 +14,7 @@ rules:
verbs: ["get", "watch", "list"] verbs: ["get", "watch", "list"]
- apiGroups: [""] - apiGroups: [""]
resources: ["namespaces"] resources: ["namespaces"]
verbs: ["get", "list"] verbs: ["get", "watch", "list"]
- apiGroups: [""] - apiGroups: [""]
resources: ["pods"] resources: ["pods"]
verbs: ["get", "watch", "list", "delete"] verbs: ["get", "watch", "list", "delete"]
@@ -24,6 +24,15 @@ rules:
- apiGroups: ["scheduling.k8s.io"] - apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"] resources: ["priorityclasses"]
verbs: ["get", "watch", "list"] verbs: ["get", "watch", "list"]
{{- if .Values.leaderElection.enabled }}
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["descheduler"]
verbs: ["get", "patch", "delete"]
{{- end }}
{{- if .Values.podSecurityPolicy.create }} {{- if .Values.podSecurityPolicy.create }}
- apiGroups: ['policy'] - apiGroups: ['policy']
resources: ['podsecuritypolicies'] resources: ['podsecuritypolicies']

View File

@@ -2,6 +2,7 @@ apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: {{ template "descheduler.fullname" . }} name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: labels:
{{- include "descheduler.labels" . | nindent 4 }} {{- include "descheduler.labels" . | nindent 4 }}
data: data:

View File

@@ -3,6 +3,7 @@ apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
kind: CronJob kind: CronJob
metadata: metadata:
name: {{ template "descheduler.fullname" . }} name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: labels:
{{- include "descheduler.labels" . | nindent 4 }} {{- include "descheduler.labels" . | nindent 4 }}
spec: spec:

View File

@@ -3,10 +3,18 @@ apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: {{ template "descheduler.fullname" . }} name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: labels:
{{- include "descheduler.labels" . | nindent 4 }} {{- include "descheduler.labels" . | nindent 4 }}
spec: spec:
{{- if gt .Values.replicas 1.0}}
{{- if not .Values.leaderElection.enabled }}
{{- fail "You must set leaderElection to use more than 1 replica"}}
{{- end}}
replicas: {{ required "leaderElection required for running more than one replica" .Values.replicas }}
{{- else }}
replicas: 1 replicas: 1
{{- end }}
selector: selector:
matchLabels: matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }} {{- include "descheduler.selectorLabels" . | nindent 6 }}
@@ -27,6 +35,10 @@ spec:
priorityClassName: {{ .Values.priorityClassName }} priorityClassName: {{ .Values.priorityClassName }}
{{- end }} {{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }} serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 10 }}
{{- end }}
containers: containers:
- name: {{ .Chart.Name }} - name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}" image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
@@ -44,6 +56,7 @@ spec:
- {{ $value | quote }} - {{ $value | quote }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- include "descheduler.leaderElection" . | nindent 12 }}
ports: ports:
- containerPort: 10258 - containerPort: 10258
protocol: TCP protocol: TCP

View File

@@ -3,6 +3,7 @@ apiVersion: policy/v1beta1
kind: PodSecurityPolicy kind: PodSecurityPolicy
metadata: metadata:
name: {{ template "descheduler.fullname" . }} name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
annotations: annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'

View File

@@ -0,0 +1,21 @@
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.service.enabled true }}
apiVersion: v1
kind: Service
metadata:
labels:
{{- include "descheduler.labels" . | nindent 4 }}
name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
clusterIP: None
ports:
- name: http-metrics
port: 10258
protocol: TCP
targetPort: 10258
selector:
{{- include "descheduler.selectorLabels" . | nindent 4 }}
type: ClusterIP
{{- end }}
{{- end }}

View File

@@ -3,6 +3,7 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: {{ template "descheduler.serviceAccountName" . }} name: {{ template "descheduler.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels: labels:
{{- include "descheduler.labels" . | nindent 4 }} {{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.serviceAccount.annotations }} {{- if .Values.serviceAccount.annotations }}

View File

@@ -0,0 +1,41 @@
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.serviceMonitor.enabled true }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "descheduler.fullname" . }}-servicemonitor
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
jobLabel: jobLabel
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }}
endpoints:
- honorLabels: {{ .Values.serviceMonitor.honorLabels | default true }}
port: http-metrics
{{- if .Values.serviceMonitor.interval }}
interval: {{ .Values.serviceMonitor.interval }}
{{- end }}
scheme: https
tlsConfig:
{{- if eq .Values.serviceMonitor.insecureSkipVerify true }}
insecureSkipVerify: true
{{- end }}
{{- if .Values.serviceMonitor.serverName }}
serverName: {{ .Values.serviceMonitor.serverName }}
{{- end}}
{{- if .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{ tpl (toYaml .Values.serviceMonitor.metricRelabelings | indent 4) . }}
{{- end }}
{{- if .Values.serviceMonitor.relabelings }}
relabelings:
{{ tpl (toYaml .Values.serviceMonitor.relabelings | indent 4) . }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -2,6 +2,7 @@ apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: descheduler-test-pod name: descheduler-test-pod
namespace: {{ .Release.Namespace }}
annotations: annotations:
"helm.sh/hook": test "helm.sh/hook": test
spec: spec:
@@ -26,4 +27,4 @@ spec:
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl &&
chmod +x ./kubectl && chmod +x ./kubectl &&
mv ./kubectl /usr/local/bin/kubectl && mv ./kubectl /usr/local/bin/kubectl &&
/usr/local/bin/kubectl get pods --namespace kube-system --token "$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | grep "descheduler" | grep "Completed" /usr/local/bin/kubectl get pods --namespace {{ .Release.Namespace }} --token "$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | grep "descheduler" | grep "Completed"

View File

@@ -11,7 +11,8 @@ image:
tag: "" tag: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
imagePullSecrets: [] imagePullSecrets:
# - name: container-registry-secret
resources: resources:
requests: requests:
@@ -37,13 +38,32 @@ suspend: false
# Required when running as a Deployment # Required when running as a Deployment
deschedulingInterval: 5m deschedulingInterval: 5m
# Specifies the replica count for Deployment
# Set leaderElection if you want to use more than 1 replica
# Set affinity.podAntiAffinity rule if you want to schedule onto a node
# only if that node is in the same zone as at least one already-running descheduler
replicas: 1
# Specifies whether Leader Election resources should be created
# Required when running as a Deployment
leaderElection: {}
# enabled: true
# leaseDuration: 15
# renewDeadline: 10
# retryPeriod: 2
# resourceLock: "leases"
# resourceName: "descheduler"
# resourceNamescape: "kube-system"
cmdOptions: cmdOptions:
v: 3 v: 3
# evict-local-storage-pods:
# max-pods-to-evict-per-node: 10
# node-selector: "key1=value1,key2=value2"
deschedulerPolicy: deschedulerPolicy:
# nodeSelector: "key1=value1,key2=value2"
# maxNoOfPodsToEvictPerNode: 10
# maxNoOfPodsToEvictPerNamespace: 10
# ignorePvcPods: true
# evictLocalStoragePods: true
strategies: strategies:
RemoveDuplicates: RemoveDuplicates:
enabled: true enabled: true
@@ -84,7 +104,15 @@ affinity: {}
# values: # values:
# - e2e-az1 # - e2e-az1
# - e2e-az2 # - e2e-az2
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - descheduler
# topologyKey: "kubernetes.io/hostname"
tolerations: [] tolerations: []
# - key: 'management' # - key: 'management'
# operator: 'Equal' # operator: 'Equal'
@@ -117,3 +145,25 @@ livenessProbe:
initialDelaySeconds: 3 initialDelaySeconds: 3
periodSeconds: 10 periodSeconds: 10
service:
enabled: false
serviceMonitor:
enabled: false
# The namespace where Prometheus expects to find service monitors.
# namespace: ""
interval: ""
# honorLabels: true
insecureSkipVerify: true
serverName: null
metricRelabelings: []
# - action: keep
# regex: 'descheduler_(build_info|pods_evicted)'
# sourceLabels: [__name__]
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace

View File

@@ -19,13 +19,15 @@ package options
import ( import (
"github.com/spf13/pflag" "github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiserveroptions "k8s.io/apiserver/pkg/server/options" apiserveroptions "k8s.io/apiserver/pkg/server/options"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
componentbaseconfig "k8s.io/component-base/config"
componentbaseoptions "k8s.io/component-base/config/options"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig" "sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1" "sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme" deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
"time"
) )
const ( const (
@@ -58,7 +60,17 @@ func NewDeschedulerServer() (*DeschedulerServer, error) {
} }
func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, error) { func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, error) {
versionedCfg := v1alpha1.DeschedulerConfiguration{} versionedCfg := v1alpha1.DeschedulerConfiguration{
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: false,
LeaseDuration: metav1.Duration{Duration: 137 * time.Second},
RenewDeadline: metav1.Duration{Duration: 107 * time.Second},
RetryPeriod: metav1.Duration{Duration: 26 * time.Second},
ResourceLock: "leases",
ResourceName: "descheduler",
ResourceNamespace: "kube-system",
},
}
deschedulerscheme.Scheme.Default(&versionedCfg) deschedulerscheme.Scheme.Default(&versionedCfg)
cfg := componentconfig.DeschedulerConfiguration{} cfg := componentconfig.DeschedulerConfiguration{}
if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil { if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
@@ -76,5 +88,7 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.") fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.") fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
rs.SecureServing.AddFlags(fs) rs.SecureServing.AddFlags(fs)
} }

View File

@@ -67,13 +67,13 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
if factory == nil { if factory == nil {
klog.ClearLogger() klog.ClearLogger()
} else { } else {
log, logrFlush := factory.Create(config.FormatOptions{}) log, logrFlush := factory.Create(config.LoggingConfiguration{})
defer logrFlush() defer logrFlush()
klog.SetLogger(log) klog.SetLogger(log)
} }
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer done()
pathRecorderMux := mux.NewPathRecorderMux("descheduler") pathRecorderMux := mux.NewPathRecorderMux("descheduler")
if !s.DisableMetrics { if !s.DisableMetrics {
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset()) pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
@@ -81,15 +81,20 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check)) healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
if _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done()); err != nil { stoppedCh, _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done())
if err != nil {
klog.Fatalf("failed to start secure server: %v", err) klog.Fatalf("failed to start secure server: %v", err)
return return
} }
err := Run(ctx, s) err = Run(ctx, s)
if err != nil { if err != nil {
klog.ErrorS(err, "descheduler server") klog.ErrorS(err, "descheduler server")
} }
done()
// wait for metrics server to close
<-stoppedCh
}, },
} }
cmd.SetOut(out) cmd.SetOut(out)

16
docs/proposals.md Normal file
View File

@@ -0,0 +1,16 @@
# Proposals
This document walk you through about all the enhancements proposals for descheduler.
## Descheduler v1alpha2 Design Proposal
```yaml
title: Descheduler v1alpha2 Design Proposal
authors:
- "@damemi"
link:
- https://docs.google.com/document/d/1S1JCh-0F-QCJvBBG-kbmXiHAJFF8doArhDIAKbOj93I/edit#heading=h.imbp1ctnc8lx
- https://github.com/kubernetes-sigs/descheduler/issues/679
owning-sig: sig-scheduling
creation-date: 2021-05-01
status: implementable
```

View File

@@ -2,14 +2,16 @@
Starting with descheduler release v0.10.0 container images are available in the official k8s container registry. Starting with descheduler release v0.10.0 container images are available in the official k8s container registry.
Descheduler Version | Container Image | Architectures | Descheduler Version | Container Image | Architectures |
------------------- |-----------------------------------------------------|-------------------------| ------------------- |--------------------------------------------|-------------------------|
v0.22.0 | k8s.gcr.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 | v0.24.0 | k8s.gcr.io/descheduler/descheduler:v0.24.0 | AMD64<br>ARM64<br>ARMv7 |
v0.21.0 | k8s.gcr.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 | v0.23.1 | k8s.gcr.io/descheduler/descheduler:v0.23.1 | AMD64<br>ARM64<br>ARMv7 |
v0.20.0 | k8s.gcr.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 | v0.22.0 | k8s.gcr.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
v0.19.0 | k8s.gcr.io/descheduler/descheduler:v0.19.0 | AMD64 | v0.21.0 | k8s.gcr.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
v0.18.0 | k8s.gcr.io/descheduler/descheduler:v0.18.0 | AMD64 | v0.20.0 | k8s.gcr.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
v0.10.0 | k8s.gcr.io/descheduler/descheduler:v0.10.0 | AMD64 | v0.19.0 | k8s.gcr.io/descheduler/descheduler:v0.19.0 | AMD64 |
v0.18.0 | k8s.gcr.io/descheduler/descheduler:v0.18.0 | AMD64 |
v0.10.0 | k8s.gcr.io/descheduler/descheduler:v0.10.0 | AMD64 |
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
starting with descheduler release v0.20.0 use the below process to download the official descheduler starting with descheduler release v0.20.0 use the below process to download the official descheduler
@@ -34,31 +36,52 @@ Usage:
descheduler [command] descheduler [command]
Available Commands: Available Commands:
completion generate the autocompletion script for the specified shell
help Help about any command help Help about any command
version Version of descheduler version Version of descheduler
Flags: Flags:
--add-dir-header If true, adds the file directory to the header of the log messages --add-dir-header If true, adds the file directory to the header of the log messages (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--alsologtostderr log to standard error as well as files --alsologtostderr log to standard error as well as files (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified. --bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0)
--dry-run execute descheduler in dry run mode. --cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates")
--evict-local-storage-pods DEPRECATED: enables evicting pods using local storage by descheduler --descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
-h, --help help for descheduler --disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
--kubeconfig string File with kube configuration. --dry-run execute descheduler in dry run mode.
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) -h, --help help for descheduler
--log-dir string If non-empty, write log files in this directory --http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
--log-file string If non-empty, use this log file --kubeconfig string File with kube configuration.
--log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) --leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 15s)
--logtostderr log to standard error instead of files (default true) --leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled. (default 10s)
--max-pods-to-evict-per-node int DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler --leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'endpoints', 'configmaps', 'leases', 'endpointsleases' and 'configmapsleases'. (default "leases")
--node-selector string DEPRECATED: selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2) --leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
--policy-config-file string File with descheduler policy configuration. --leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
--skip-headers If true, avoid header prefixes in the log messages --leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 2s)
--skip-log-headers If true, avoid headers when opening log files --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--stderrthreshold severity logs at or above this threshold go to stderr (default 2) --log-dir string If non-empty, write log files in this directory (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
-v, --v Level number for the log level verbosity --log-file string If non-empty, use this log file (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
--logging-format string Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning. (default "text")
--logtostderr log to standard error instead of files (default true) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--one-output If true, only write logs to their native severity level (vs also writing to each lower severity level) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
--permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]
--policy-config-file string File with descheduler policy configuration.
--secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 10258)
--skip-headers If true, avoid header prefixes in the log messages (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--skip-log-headers If true, avoid headers when opening log files (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--stderrthreshold severity logs at or above this threshold go to stderr (default 2) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
--tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA.
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
-v, --v Level number for the log level verbosity
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
Use "descheduler [command] --help" for more information about a command. Use "descheduler [command] --help" for more information about a command.
``` ```
@@ -89,7 +112,8 @@ strategies:
"PodLifeTime": "PodLifeTime":
enabled: true enabled: true
params: params:
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days podLifeTime:
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
``` ```
### Balance Cluster By Node Memory Utilization ### Balance Cluster By Node Memory Utilization
@@ -117,7 +141,7 @@ strategies:
#### Balance low utilization nodes #### Balance low utilization nodes
Using `HighNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods Using `HighNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods
from nodes with memory utilization lower than 20%. This should be used along with scheduler strategy `MostRequestedPriority`. from nodes with memory utilization lower than 20%. This should be use `NodeResourcesFit` with the `MostAllocated` scoring strategy based on these [doc](https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins).
The evicted pods will be compacted into minimal set of nodes. The evicted pods will be compacted into minimal set of nodes.
``` ```
@@ -136,7 +160,14 @@ strategies:
Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with
[Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and [Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and
[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) to automatically remove [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) to automatically remove
Nodes which have problems. Node Problem Detector can detect specific Node problems and taint any Nodes which have those Nodes which have problems. Node Problem Detector can detect specific Node problems and report them to the API server.
problems. The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource There is a feature called TaintNodeByCondition of the node controller that takes some conditions and turns them into taints. Currently, this only works for the default node conditions: PIDPressure, MemoryPressure, DiskPressure, Ready, and some cloud provider specific conditions.
The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
allocation falls below the Cluster Autoscaler's scale down threshold, the Node will become a scale down candidate allocation falls below the Cluster Autoscaler's scale down threshold, the Node will become a scale down candidate
and can be removed by Cluster Autoscaler. These three components form an autohealing cycle for Node problems. and can be removed by Cluster Autoscaler. These three components form an autohealing cycle for Node problems.
---
**NOTE**
Once [kubernetes/node-problem-detector#565](https://github.com/kubernetes/node-problem-detector/pull/565) is available in NPD, we need to update this section.
---

66
go.mod
View File

@@ -4,16 +4,16 @@ go 1.17
require ( require (
github.com/client9/misspell v0.3.4 github.com/client9/misspell v0.3.4
github.com/spf13/cobra v1.2.1 github.com/spf13/cobra v1.4.0
github.com/spf13/pflag v1.0.5 github.com/spf13/pflag v1.0.5
k8s.io/api v0.23.0 k8s.io/api v0.24.0
k8s.io/apimachinery v0.23.0 k8s.io/apimachinery v0.24.0
k8s.io/apiserver v0.23.0 k8s.io/apiserver v0.24.0
k8s.io/client-go v0.23.0 k8s.io/client-go v0.24.0
k8s.io/code-generator v0.23.0 k8s.io/code-generator v0.24.0
k8s.io/component-base v0.23.0 k8s.io/component-base v0.24.0
k8s.io/component-helpers v0.23.0 k8s.io/component-helpers v0.24.0
k8s.io/klog/v2 v2.30.0 k8s.io/klog/v2 v2.60.1
k8s.io/kubectl v0.20.5 k8s.io/kubectl v0.20.5
sigs.k8s.io/mdtoc v1.0.1 sigs.k8s.io/mdtoc v1.0.1
) )
@@ -31,8 +31,8 @@ require (
github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
@@ -50,10 +50,10 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect github.com/golang/protobuf v1.5.2 // indirect
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 // indirect github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.5.5 // indirect github.com/google/go-cmp v0.5.5 // indirect
github.com/google/gofuzz v1.1.0 // indirect github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.1.2 // indirect github.com/google/uuid v1.1.2 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/imdario/mergo v0.3.5 // indirect github.com/imdario/mergo v0.3.5 // indirect
@@ -67,13 +67,13 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.11.0 // indirect github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.28.0 // indirect github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.6.0 // indirect github.com/prometheus/procfs v0.7.3 // indirect
go.etcd.io/etcd/api/v3 v3.5.0 // indirect go.etcd.io/etcd/api/v3 v3.5.1 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.1 // indirect
go.etcd.io/etcd/client/v3 v3.5.0 // indirect go.etcd.io/etcd/client/v3 v3.5.1 // indirect
go.opentelemetry.io/contrib v0.20.0 // indirect go.opentelemetry.io/contrib v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect
@@ -88,30 +88,30 @@ require (
go.uber.org/atomic v1.7.0 // indirect go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.0 // indirect go.uber.org/zap v1.19.0 // indirect
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
golang.org/x/mod v0.4.2 // indirect golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e // indirect golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff // indirect golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect
google.golang.org/grpc v1.40.0 // indirect google.golang.org/grpc v1.40.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 // indirect
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect sigs.k8s.io/yaml v1.2.0 // indirect
) )

151
go.sum
View File

@@ -39,7 +39,6 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
@@ -81,6 +80,7 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
@@ -91,16 +91,18 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
@@ -125,6 +127,7 @@ github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzA
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -246,6 +249,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -282,9 +287,6 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
@@ -359,7 +361,6 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
@@ -380,12 +381,11 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4= github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4=
github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs= github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -419,7 +419,6 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -434,8 +433,9 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -446,21 +446,23 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw= github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
@@ -481,19 +483,16 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -516,18 +515,21 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU= go.etcd.io/etcd/api/v3 v3.5.1 h1:v28cktvBq+7vGyJXF8G+rWJmj+1XUmMtqcLnH8hDocM=
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/pkg/v3 v3.5.1 h1:XIQcHCFSG53bJETYeRJtIxdLv2EWRGxcfzR8lSnTH4E=
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs= go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek=
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
go.etcd.io/etcd/client/v3 v3.5.1 h1:oImGuV5LGKjCqXdjkMHCyWa5OO1gYKCnC/1sgdfj1Uk=
go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q=
go.etcd.io/etcd/pkg/v3 v3.5.0 h1:ntrg6vvKRW26JRmHTE0iNlDgYK6JX3hg/4cD62X0ixk= go.etcd.io/etcd/pkg/v3 v3.5.0 h1:ntrg6vvKRW26JRmHTE0iNlDgYK6JX3hg/4cD62X0ixk=
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
go.etcd.io/etcd/raft/v3 v3.5.0 h1:kw2TmO3yFTgE+F0mdKkG7xMxkit2duBDa2Hu6D/HMlw= go.etcd.io/etcd/raft/v3 v3.5.0 h1:kw2TmO3yFTgE+F0mdKkG7xMxkit2duBDa2Hu6D/HMlw=
@@ -588,8 +590,9 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -625,8 +628,9 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -671,9 +675,10 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -685,10 +690,9 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -742,7 +746,6 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -763,12 +766,14 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e h1:XMgFehsDnnLGtjvjOfqWSUzt0alpTR1RSEuznObga2c= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -784,8 +789,8 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -845,8 +850,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff h1:VX/uD7MK0AHXGiScH3fsieUQUcpmRERPDYtqZdJnA+Q= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717 h1:hI3jKY4Hpf63ns040onEbB3dAkR/H/P83hw1TG8dD3Y=
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -873,7 +878,6 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -925,8 +929,8 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 h1:NHN4wOCScVzKhPenJ2dt+BTs3X/XkBVI/Rh4iDt55T8= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 h1:Et6SkiuvnBn+SgrSYXs/BrUpGB4mbdwt4R3vaPIlicA=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -975,7 +979,6 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
@@ -1007,59 +1010,59 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.20.5/go.mod h1:FQjAceXnVaWDeov2YUWhOb6Yt+5UjErkp6UO3nczO1Y= k8s.io/api v0.20.5/go.mod h1:FQjAceXnVaWDeov2YUWhOb6Yt+5UjErkp6UO3nczO1Y=
k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro= k8s.io/api v0.24.0 h1:J0hann2hfxWr1hinZIDefw7Q96wmCBx6SSB8IY0MdDg=
k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I=
k8s.io/apimachinery v0.20.5/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.5/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.23.0 h1:mIfWRMjBuMdolAWJ3Fd+aPTMv3X9z+waiARMpvvb0HQ= k8s.io/apimachinery v0.24.0 h1:ydFCyC/DjCvFCHK5OPMKBlxayQytB8pxy8YQInd5UyQ=
k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
k8s.io/apiserver v0.23.0 h1:Ds/QveXWi9aJ8ISB0CJa4zBNc5njxAs5u3rmMIexqCY= k8s.io/apiserver v0.24.0 h1:GR7kGsjOMfilRvlG3Stxv/3uz/ryvJ/aZXc5pqdsNV0=
k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= k8s.io/apiserver v0.24.0/go.mod h1:WFx2yiOMawnogNToVvUYT9nn1jaIkMKj41ZYCVycsBA=
k8s.io/cli-runtime v0.20.5/go.mod h1:ihjPeQWDk7NGVIkNEvpwxA3gJvqtU+LtkDj11TvyXn4= k8s.io/cli-runtime v0.20.5/go.mod h1:ihjPeQWDk7NGVIkNEvpwxA3gJvqtU+LtkDj11TvyXn4=
k8s.io/client-go v0.20.5/go.mod h1:Ee5OOMMYvlH8FCZhDsacjMlCBwetbGZETwo1OA+e6Zw= k8s.io/client-go v0.20.5/go.mod h1:Ee5OOMMYvlH8FCZhDsacjMlCBwetbGZETwo1OA+e6Zw=
k8s.io/client-go v0.23.0 h1:vcsOqyPq7XV3QmQRCBH/t9BICJM9Q1M18qahjv+rebY= k8s.io/client-go v0.24.0 h1:lbE4aB1gTHvYFSwm6eD3OF14NhFDKCejlnsGYlSJe5U=
k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= k8s.io/client-go v0.24.0/go.mod h1:VFPQET+cAFpYxh6Bq6f4xyMY80G6jKKktU6G0m00VDw=
k8s.io/code-generator v0.20.5/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= k8s.io/code-generator v0.20.5/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
k8s.io/code-generator v0.23.0 h1:lhyd2KJVCEmpjaCpuoooGs+e3xhPwpYvupnNRidO0Ds= k8s.io/code-generator v0.24.0 h1:7v52LjqCntfGxV9x8c57gkhDqkMHd0Z2jfRqGr6it6g=
k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
k8s.io/component-base v0.20.5/go.mod h1:l0isoBLGyQKwRoTWbPHR6jNDd3/VqQD43cNlsjddGng= k8s.io/component-base v0.20.5/go.mod h1:l0isoBLGyQKwRoTWbPHR6jNDd3/VqQD43cNlsjddGng=
k8s.io/component-base v0.23.0 h1:UAnyzjvVZ2ZR1lF35YwtNY6VMN94WtOnArcXBu34es8= k8s.io/component-base v0.24.0 h1:h5jieHZQoHrY/lHG+HyrSbJeyfuitheBvqvKwKHVC0g=
k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI= k8s.io/component-base v0.24.0/go.mod h1:Dgazgon0i7KYUsS8krG8muGiMVtUZxG037l1MKyXgrA=
k8s.io/component-helpers v0.20.5/go.mod h1:AzTdoPj6YAN2SUfhBX/FUUU3ntfFuse03q/VMLovEsE= k8s.io/component-helpers v0.20.5/go.mod h1:AzTdoPj6YAN2SUfhBX/FUUU3ntfFuse03q/VMLovEsE=
k8s.io/component-helpers v0.23.0 h1:qNbqN10QTefiWcCOPkHL/0nn81sdKVv6ZgEXcSyot/U= k8s.io/component-helpers v0.24.0 h1:hZIHGfdd55thhqd9oxjDTw68OAPauDMJ+8hC69aNw1I=
k8s.io/component-helpers v0.23.0/go.mod h1:liXMh6FZS4qamKtMJQ7uLHnFe3tlC86RX5mJEk/aerg= k8s.io/component-helpers v0.24.0/go.mod h1:Q2SlLm4h6g6lPTC9GMMfzdywfLSvJT2f1hOnnjaWD8c=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 h1:TT1WdmqqXareKxZ/oNXEUSwKlLiHzPMyB0t8BaFeBYI=
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
k8s.io/kubectl v0.20.5 h1:/wndy8hw5TsL8G8KWPDJrtPKS8D34uSdWS0BMRmtzWs= k8s.io/kubectl v0.20.5 h1:/wndy8hw5TsL8G8KWPDJrtPKS8D34uSdWS0BMRmtzWs=
k8s.io/kubectl v0.20.5/go.mod h1:mlNQgyV18D4XFt5BmfSkrxQNS+arT2pXDQxxnH5lMiw= k8s.io/kubectl v0.20.5/go.mod h1:mlNQgyV18D4XFt5BmfSkrxQNS+arT2pXDQxxnH5lMiw=
k8s.io/metrics v0.20.5/go.mod h1:vsptOayjKWKWHvWR1vFQY++vxydzaEo/2+JC7kSDKPU= k8s.io/metrics v0.20.5/go.mod h1:vsptOayjKWKWHvWR1vFQY++vxydzaEo/2+JC7kSDKPU=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25 h1:DEQ12ZRxJjsglk5JIi5bLgpKaHihGervKmg5uryaEHw= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 h1:dUk62HQ3ZFhD48Qr8MIXCiKA8wInBQCtuE4QGfFW7yA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
sigs.k8s.io/mdtoc v1.0.1 h1:6ECKhQnbetwZBR6R2IeT2LH+1w+2Zsip0iXjikgaXIk= sigs.k8s.io/mdtoc v1.0.1 h1:6ECKhQnbetwZBR6R2IeT2LH+1w+2Zsip0iXjikgaXIk=
sigs.k8s.io/mdtoc v1.0.1/go.mod h1:COYBtOjsaCg7o7SC4eaLwEXPuVRSuiVuLLRrHd7kShw= sigs.k8s.io/mdtoc v1.0.1/go.mod h1:COYBtOjsaCg7o7SC4eaLwEXPuVRSuiVuLLRrHd7kShw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
sigs.k8s.io/structured-merge-diff/v4 v4.2.0 h1:kDvPBbnPk+qYmkHmSo8vKGp438IASWofnbbUKDE/bv0= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

View File

@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version)) GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16|go1.17') ]]; then if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16|go1.17|go1.18') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt." echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1 exit 1
fi fi

View File

@@ -12,7 +12,7 @@ rules:
verbs: ["get", "watch", "list"] verbs: ["get", "watch", "list"]
- apiGroups: [""] - apiGroups: [""]
resources: ["namespaces"] resources: ["namespaces"]
verbs: ["get", "list"] verbs: ["get", "watch", "list"]
- apiGroups: [""] - apiGroups: [""]
resources: ["pods"] resources: ["pods"]
verbs: ["get", "watch", "list", "delete"] verbs: ["get", "watch", "list", "delete"]
@@ -22,6 +22,13 @@ rules:
- apiGroups: ["scheduling.k8s.io"] - apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"] resources: ["priorityclasses"]
verbs: ["get", "watch", "list"] verbs: ["get", "watch", "list"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["descheduler"]
verbs: ["get", "patch", "delete"]
--- ---
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
@@ -41,4 +48,3 @@ subjects:
- name: descheduler-sa - name: descheduler-sa
kind: ServiceAccount kind: ServiceAccount
namespace: kube-system namespace: kube-system

View File

@@ -16,7 +16,7 @@ spec:
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
containers: containers:
- name: descheduler - name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.22.0 image: k8s.gcr.io/descheduler/descheduler:v0.24.0
volumeMounts: volumeMounts:
- mountPath: /policy-dir - mountPath: /policy-dir
name: policy-volume name: policy-volume

View File

@@ -19,7 +19,7 @@ spec:
serviceAccountName: descheduler-sa serviceAccountName: descheduler-sa
containers: containers:
- name: descheduler - name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.22.0 image: k8s.gcr.io/descheduler/descheduler:v0.24.0
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
command: command:
- "/bin/descheduler" - "/bin/descheduler"

View File

@@ -14,7 +14,7 @@ spec:
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
containers: containers:
- name: descheduler - name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.22.0 image: k8s.gcr.io/descheduler/descheduler:v0.24.0
volumeMounts: volumeMounts:
- mountPath: /policy-dir - mountPath: /policy-dir
name: policy-volume name: policy-volume

View File

@@ -88,15 +88,18 @@ type StrategyParameters struct {
ThresholdPriorityClassName string ThresholdPriorityClassName string
LabelSelector *metav1.LabelSelector LabelSelector *metav1.LabelSelector
NodeFit bool NodeFit bool
IncludePreferNoSchedule bool
ExcludedTaints []string
} }
type Percentage float64 type Percentage float64
type ResourceThresholds map[v1.ResourceName]Percentage type ResourceThresholds map[v1.ResourceName]Percentage
type NodeResourceUtilizationThresholds struct { type NodeResourceUtilizationThresholds struct {
Thresholds ResourceThresholds UseDeviationThresholds bool
TargetThresholds ResourceThresholds Thresholds ResourceThresholds
NumberOfNodes int TargetThresholds ResourceThresholds
NumberOfNodes int
} }
type PodsHavingTooManyRestarts struct { type PodsHavingTooManyRestarts struct {

View File

@@ -86,15 +86,18 @@ type StrategyParameters struct {
ThresholdPriorityClassName string `json:"thresholdPriorityClassName"` ThresholdPriorityClassName string `json:"thresholdPriorityClassName"`
LabelSelector *metav1.LabelSelector `json:"labelSelector"` LabelSelector *metav1.LabelSelector `json:"labelSelector"`
NodeFit bool `json:"nodeFit"` NodeFit bool `json:"nodeFit"`
IncludePreferNoSchedule bool `json:"includePreferNoSchedule"`
ExcludedTaints []string `json:"excludedTaints,omitempty"`
} }
type Percentage float64 type Percentage float64
type ResourceThresholds map[v1.ResourceName]Percentage type ResourceThresholds map[v1.ResourceName]Percentage
type NodeResourceUtilizationThresholds struct { type NodeResourceUtilizationThresholds struct {
Thresholds ResourceThresholds `json:"thresholds,omitempty"` UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"` Thresholds ResourceThresholds `json:"thresholds,omitempty"`
NumberOfNodes int `json:"numberOfNodes,omitempty"` TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
NumberOfNodes int `json:"numberOfNodes,omitempty"`
} }
type PodsHavingTooManyRestarts struct { type PodsHavingTooManyRestarts struct {

View File

@@ -261,6 +261,7 @@ func Convert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Name
} }
func autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error { func autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
out.UseDeviationThresholds = in.UseDeviationThresholds
out.Thresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.Thresholds)) out.Thresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
out.TargetThresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds)) out.TargetThresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
out.NumberOfNodes = in.NumberOfNodes out.NumberOfNodes = in.NumberOfNodes
@@ -273,6 +274,7 @@ func Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtili
} }
func autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in *api.NodeResourceUtilizationThresholds, out *NodeResourceUtilizationThresholds, s conversion.Scope) error { func autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in *api.NodeResourceUtilizationThresholds, out *NodeResourceUtilizationThresholds, s conversion.Scope) error {
out.UseDeviationThresholds = in.UseDeviationThresholds
out.Thresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.Thresholds)) out.Thresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
out.TargetThresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds)) out.TargetThresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
out.NumberOfNodes = in.NumberOfNodes out.NumberOfNodes = in.NumberOfNodes
@@ -361,6 +363,8 @@ func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *Strat
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.NodeFit = in.NodeFit out.NodeFit = in.NodeFit
out.IncludePreferNoSchedule = in.IncludePreferNoSchedule
out.ExcludedTaints = *(*[]string)(unsafe.Pointer(&in.ExcludedTaints))
return nil return nil
} }
@@ -382,6 +386,8 @@ func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.S
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.NodeFit = in.NodeFit out.NodeFit = in.NodeFit
out.IncludePreferNoSchedule = in.IncludePreferNoSchedule
out.ExcludedTaints = *(*[]string)(unsafe.Pointer(&in.ExcludedTaints))
return nil return nil
} }

View File

@@ -356,6 +356,11 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = new(v1.LabelSelector) *out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.ExcludedTaints != nil {
in, out := &in.ExcludedTaints, &out.ExcludedTaints
*out = make([]string, len(*in))
copy(*out, *in)
}
return return
} }

View File

@@ -356,6 +356,11 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = new(v1.LabelSelector) *out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.ExcludedTaints != nil {
in, out := &in.ExcludedTaints, &out.ExcludedTaints
*out = make([]string, len(*in))
copy(*out, *in)
}
return return
} }

View File

@@ -53,6 +53,9 @@ type DeschedulerConfiguration struct {
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted // IgnorePVCPods sets whether PVC pods should be allowed to be evicted
IgnorePVCPods bool IgnorePVCPods bool
// LeaderElection starts Deployment using leader election loop
LeaderElection componentbaseconfig.LeaderElectionConfiguration
// Logging specifies the options of logging. // Logging specifies the options of logging.
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. // Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
Logging componentbaseconfig.LoggingConfiguration Logging componentbaseconfig.LoggingConfiguration

View File

@@ -54,6 +54,9 @@ type DeschedulerConfiguration struct {
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted // IgnorePVCPods sets whether PVC pods should be allowed to be evicted
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"` IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
// LeaderElection starts Deployment using leader election loop
LeaderElection componentbaseconfig.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
// Logging specifies the options of logging. // Logging specifies the options of logging.
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. // Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
Logging componentbaseconfig.LoggingConfiguration `json:"logging,omitempty"` Logging componentbaseconfig.LoggingConfiguration `json:"logging,omitempty"`

View File

@@ -58,6 +58,7 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods out.EvictLocalStoragePods = in.EvictLocalStoragePods
out.IgnorePVCPods = in.IgnorePVCPods out.IgnorePVCPods = in.IgnorePVCPods
out.LeaderElection = in.LeaderElection
out.Logging = in.Logging out.Logging = in.Logging
return nil return nil
} }
@@ -76,6 +77,7 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods out.EvictLocalStoragePods = in.EvictLocalStoragePods
out.IgnorePVCPods = in.IgnorePVCPods out.IgnorePVCPods = in.IgnorePVCPods
out.LeaderElection = in.LeaderElection
out.Logging = in.Logging out.Logging = in.Logging
return nil return nil
} }

View File

@@ -29,6 +29,7 @@ import (
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) { func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in *out = *in
out.TypeMeta = in.TypeMeta out.TypeMeta = in.TypeMeta
out.LeaderElection = in.LeaderElection
in.Logging.DeepCopyInto(&out.Logging) in.Logging.DeepCopyInto(&out.Logging)
return return
} }

View File

@@ -29,6 +29,7 @@ import (
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) { func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in *out = *in
out.TypeMeta = in.TypeMeta out.TypeMeta = in.TypeMeta
out.LeaderElection = in.LeaderElection
in.Logging.DeepCopyInto(&out.Logging) in.Logging.DeepCopyInto(&out.Logging)
return return
} }

View File

@@ -69,13 +69,22 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
return err return err
} }
// tie in root ctx with our wait stopChannel runFn := func() error {
stopChannel := make(chan struct{}) return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion)
go func() { }
<-ctx.Done()
close(stopChannel) if rs.LeaderElection.LeaderElect && rs.DeschedulingInterval.Seconds() == 0 {
}() return fmt.Errorf("leaderElection must be used with deschedulingInterval")
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel) }
if rs.LeaderElection.LeaderElect && !rs.DryRun {
if err := NewLeaderElection(runFn, rsclient, &rs.LeaderElection, ctx); err != nil {
return fmt.Errorf("leaderElection: %w", err)
}
return nil
}
return runFn()
} }
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc)
@@ -156,13 +165,16 @@ func cachedClient(
return fakeClient, nil return fakeClient, nil
} }
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error { func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0) sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
nodeInformer := sharedInformerFactory.Core().V1().Nodes() nodeInformer := sharedInformerFactory.Core().V1().Nodes()
podInformer := sharedInformerFactory.Core().V1().Pods() podInformer := sharedInformerFactory.Core().V1().Pods()
namespaceInformer := sharedInformerFactory.Core().V1().Namespaces() namespaceInformer := sharedInformerFactory.Core().V1().Namespaces()
priorityClassInformer := sharedInformerFactory.Scheduling().V1().PriorityClasses() priorityClassInformer := sharedInformerFactory.Scheduling().V1().PriorityClasses()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// create the informers // create the informers
namespaceInformer.Informer() namespaceInformer.Informer()
priorityClassInformer.Informer() priorityClassInformer.Informer()
@@ -172,8 +184,8 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
return fmt.Errorf("build get pods assigned to node function error: %v", err) return fmt.Errorf("build get pods assigned to node function error: %v", err)
} }
sharedInformerFactory.Start(stopChannel) sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(stopChannel) sharedInformerFactory.WaitForCacheSync(ctx.Done())
strategyFuncs := map[api.StrategyName]strategyFunction{ strategyFuncs := map[api.StrategyName]strategyFunction{
"RemoveDuplicates": strategies.RemoveDuplicatePods, "RemoveDuplicates": strategies.RemoveDuplicatePods,
@@ -223,13 +235,13 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector) nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector)
if err != nil { if err != nil {
klog.V(1).InfoS("Unable to get ready nodes", "err", err) klog.V(1).InfoS("Unable to get ready nodes", "err", err)
close(stopChannel) cancel()
return return
} }
if len(nodes) <= 1 { if len(nodes) <= 1 {
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..") klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
close(stopChannel) cancel()
return return
} }
@@ -271,6 +283,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
deschedulerPolicy.MaxNoOfPodsToEvictPerNode, deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace, deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
nodes, nodes,
getPodsAssignedToNode,
evictLocalStoragePods, evictLocalStoragePods,
evictSystemCriticalPods, evictSystemCriticalPods,
ignorePvcPods, ignorePvcPods,
@@ -292,9 +305,9 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration // If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
if rs.DeschedulingInterval.Seconds() == 0 { if rs.DeschedulingInterval.Seconds() == 0 {
close(stopChannel) cancel()
} }
}, rs.DeschedulingInterval, stopChannel) }, rs.DeschedulingInterval, ctx.Done())
return nil return nil
} }

View File

@@ -35,9 +35,6 @@ func TestTaintsUpdated(t *testing.T) {
}, },
} }
stopChannel := make(chan struct{})
defer close(stopChannel)
rs, err := options.NewDeschedulerServer() rs, err := options.NewDeschedulerServer()
if err != nil { if err != nil {
t.Fatalf("Unable to initialize server: %v", err) t.Fatalf("Unable to initialize server: %v", err)
@@ -47,7 +44,7 @@ func TestTaintsUpdated(t *testing.T) {
errChan := make(chan error, 1) errChan := make(chan error, 1)
defer close(errChan) defer close(errChan)
go func() { go func() {
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1", stopChannel) err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1")
errChan <- err errChan <- err
}() }()
select { select {
@@ -101,3 +98,69 @@ func TestTaintsUpdated(t *testing.T) {
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies") t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies")
} }
} }
func TestRootCancel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
client := fakeclientset.NewSimpleClientset(n1, n2)
dp := &api.DeschedulerPolicy{
Strategies: api.StrategyList{}, // no strategies needed for this test
}
rs, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("Unable to initialize server: %v", err)
}
rs.Client = client
rs.DeschedulingInterval = 100 * time.Millisecond
errChan := make(chan error, 1)
defer close(errChan)
go func() {
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1")
errChan <- err
}()
cancel()
select {
case err := <-errChan:
if err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
case <-time.After(1 * time.Second):
t.Fatal("Root ctx should have canceled immediately")
}
}
func TestRootCancelWithNoInterval(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
client := fakeclientset.NewSimpleClientset(n1, n2)
dp := &api.DeschedulerPolicy{
Strategies: api.StrategyList{}, // no strategies needed for this test
}
rs, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("Unable to initialize server: %v", err)
}
rs.Client = client
rs.DeschedulingInterval = 0
errChan := make(chan error, 1)
defer close(errChan)
go func() {
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1")
errChan <- err
}()
cancel()
select {
case err := <-errChan:
if err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
case <-time.After(1 * time.Second):
t.Fatal("Root ctx should have canceled immediately")
}
}

View File

@@ -51,6 +51,7 @@ type namespacePodEvictCount map[string]uint
type PodEvictor struct { type PodEvictor struct {
client clientset.Interface client clientset.Interface
nodes []*v1.Node nodes []*v1.Node
nodeIndexer podutil.GetPodsAssignedToNodeFunc
policyGroupVersion string policyGroupVersion string
dryRun bool dryRun bool
maxPodsToEvictPerNode *uint maxPodsToEvictPerNode *uint
@@ -71,6 +72,7 @@ func NewPodEvictor(
maxPodsToEvictPerNode *uint, maxPodsToEvictPerNode *uint,
maxPodsToEvictPerNamespace *uint, maxPodsToEvictPerNamespace *uint,
nodes []*v1.Node, nodes []*v1.Node,
nodeIndexer podutil.GetPodsAssignedToNodeFunc,
evictLocalStoragePods bool, evictLocalStoragePods bool,
evictSystemCriticalPods bool, evictSystemCriticalPods bool,
ignorePvcPods bool, ignorePvcPods bool,
@@ -87,6 +89,7 @@ func NewPodEvictor(
return &PodEvictor{ return &PodEvictor{
client: client, client: client,
nodes: nodes, nodes: nodes,
nodeIndexer: nodeIndexer,
policyGroupVersion: policyGroupVersion, policyGroupVersion: policyGroupVersion,
dryRun: dryRun, dryRun: dryRun,
maxPodsToEvictPerNode: maxPodsToEvictPerNode, maxPodsToEvictPerNode: maxPodsToEvictPerNode,
@@ -155,9 +158,9 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
} }
if pe.dryRun { if pe.dryRun {
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason) klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason, "strategy", strategy, "node", node.Name)
} else { } else {
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", reason) klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", reason, "strategy", strategy, "node", node.Name)
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartStructuredLogging(3) eventBroadcaster.StartStructuredLogging(3)
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)}) eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})
@@ -296,7 +299,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
} }
if options.nodeFit { if options.nodeFit {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error { ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if !nodeutil.PodFitsAnyOtherNode(pod, pe.nodes) { if !nodeutil.PodFitsAnyOtherNode(pe.nodeIndexer, pod, pe.nodes) {
return fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable") return fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable")
} }
return nil return nil

View File

@@ -21,8 +21,10 @@ import (
"testing" "testing"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
@@ -80,9 +82,9 @@ func TestIsEvictable(t *testing.T) {
nodeLabelKey := "datacenter" nodeLabelKey := "datacenter"
nodeLabelValue := "east" nodeLabelValue := "east"
type testCase struct { type testCase struct {
pod *v1.Pod description string
pods []*v1.Pod
nodes []*v1.Node nodes []*v1.Node
runBefore func(*v1.Pod, []*v1.Node)
evictFailedBarePods bool evictFailedBarePods bool
evictLocalStoragePods bool evictLocalStoragePods bool
evictSystemCriticalPods bool evictSystemCriticalPods bool
@@ -92,261 +94,309 @@ func TestIsEvictable(t *testing.T) {
} }
testCases := []testCase{ testCases := []testCase{
{ // Failed pod eviction with no ownerRefs. {
pod: test.BuildTestPod("bare_pod_failed", 400, 0, n1.Name, nil), description: "Failed pod eviction with no ownerRefs",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.Status.Phase = v1.PodFailed test.BuildTestPod("bare_pod_failed", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.Status.Phase = v1.PodFailed
}),
}, },
evictFailedBarePods: false, evictFailedBarePods: false,
result: false, result: false,
}, { // Normal pod eviction with no ownerRefs and evictFailedBarePods enabled }, {
pod: test.BuildTestPod("bare_pod", 400, 0, n1.Name, nil), description: "Normal pod eviction with no ownerRefs and evictFailedBarePods enabled",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{test.BuildTestPod("bare_pod", 400, 0, n1.Name, nil)},
},
evictFailedBarePods: true, evictFailedBarePods: true,
result: false, result: false,
}, { // Failed pod eviction with no ownerRefs }, {
pod: test.BuildTestPod("bare_pod_failed_but_can_be_evicted", 400, 0, n1.Name, nil), description: "Failed pod eviction with no ownerRefs",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.Status.Phase = v1.PodFailed test.BuildTestPod("bare_pod_failed_but_can_be_evicted", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.Status.Phase = v1.PodFailed
}),
}, },
evictFailedBarePods: true, evictFailedBarePods: true,
result: true, result: true,
}, { // Normal pod eviction with normal ownerRefs }, {
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil), description: "Normal pod eviction with normal ownerRefs",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
result: true, result: true,
}, { // Normal pod eviction with normal ownerRefs and descheduler.alpha.kubernetes.io/evict annotation }, {
pod: test.BuildTestPod("p2", 400, 0, n1.Name, nil), description: "Normal pod eviction with normal ownerRefs and descheduler.alpha.kubernetes.io/evict annotation",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
result: true, result: true,
}, { // Normal pod eviction with replicaSet ownerRefs }, {
pod: test.BuildTestPod("p3", 400, 0, n1.Name, nil), description: "Normal pod eviction with replicaSet ownerRefs",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
result: true, result: true,
}, { // Normal pod eviction with replicaSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation }, {
pod: test.BuildTestPod("p4", 400, 0, n1.Name, nil), description: "Normal pod eviction with replicaSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} test.BuildTestPod("p4", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
result: true, result: true,
}, { // Normal pod eviction with statefulSet ownerRefs }, {
pod: test.BuildTestPod("p18", 400, 0, n1.Name, nil), description: "Normal pod eviction with statefulSet ownerRefs",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetStatefulSetOwnerRefList() test.BuildTestPod("p18", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
result: true, result: true,
}, { // Normal pod eviction with statefulSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation }, {
pod: test.BuildTestPod("p19", 400, 0, n1.Name, nil), description: "Normal pod eviction with statefulSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} test.BuildTestPod("p19", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetStatefulSetOwnerRefList() pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.ObjectMeta.OwnerReferences = test.GetStatefulSetOwnerRefList()
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
result: true, result: true,
}, { // Pod not evicted because it is bound to a PV and evictLocalStoragePods = false }, {
pod: test.BuildTestPod("p5", 400, 0, n1.Name, nil), description: "Pod not evicted because it is bound to a PV and evictLocalStoragePods = false",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() test.BuildTestPod("p5", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.Spec.Volumes = []v1.Volume{ pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
{ pod.Spec.Volumes = []v1.Volume{
Name: "sample", {
VolumeSource: v1.VolumeSource{ Name: "sample",
HostPath: &v1.HostPathVolumeSource{Path: "somePath"}, VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{ HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)}, EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
}, },
}, }
} }),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
result: false, result: false,
}, { // Pod is evicted because it is bound to a PV and evictLocalStoragePods = true }, {
pod: test.BuildTestPod("p6", 400, 0, n1.Name, nil), description: "Pod is evicted because it is bound to a PV and evictLocalStoragePods = true",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() test.BuildTestPod("p6", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.Spec.Volumes = []v1.Volume{ pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
{ pod.Spec.Volumes = []v1.Volume{
Name: "sample", {
VolumeSource: v1.VolumeSource{ Name: "sample",
HostPath: &v1.HostPathVolumeSource{Path: "somePath"}, VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{ HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)}, EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
}, },
}, }
} }),
}, },
evictLocalStoragePods: true, evictLocalStoragePods: true,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
result: true, result: true,
}, { // Pod is evicted because it is bound to a PV and evictLocalStoragePods = false, but it has scheduler.alpha.kubernetes.io/evict annotation }, {
pod: test.BuildTestPod("p7", 400, 0, n1.Name, nil), description: "Pod is evicted because it is bound to a PV and evictLocalStoragePods = false, but it has scheduler.alpha.kubernetes.io/evict annotation",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} test.BuildTestPod("p7", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.Spec.Volumes = []v1.Volume{ pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
{ pod.Spec.Volumes = []v1.Volume{
Name: "sample", {
VolumeSource: v1.VolumeSource{ Name: "sample",
HostPath: &v1.HostPathVolumeSource{Path: "somePath"}, VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{ HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)}, EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
}, },
}, }
} }),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
result: true, result: true,
}, { // Pod not evicted becasuse it is part of a daemonSet }, {
pod: test.BuildTestPod("p8", 400, 0, n1.Name, nil), description: "Pod not evicted becasuse it is part of a daemonSet",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList() test.BuildTestPod("p8", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
result: false, result: false,
}, { // Pod is evicted becasuse it is part of a daemonSet, but it has scheduler.alpha.kubernetes.io/evict annotation }, {
pod: test.BuildTestPod("p9", 400, 0, n1.Name, nil), description: "Pod is evicted becasuse it is part of a daemonSet, but it has scheduler.alpha.kubernetes.io/evict annotation",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} test.BuildTestPod("p9", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList() pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
result: true, result: true,
}, { // Pod not evicted becasuse it is a mirror pod }, {
pod: test.BuildTestPod("p10", 400, 0, n1.Name, nil), description: "Pod not evicted becasuse it is a mirror poddsa",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() test.BuildTestPod("p10", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.Annotations = test.GetMirrorPodAnnotation() pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Annotations = test.GetMirrorPodAnnotation()
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
result: false, result: false,
}, { // Pod is evicted becasuse it is a mirror pod, but it has scheduler.alpha.kubernetes.io/evict annotation }, {
pod: test.BuildTestPod("p11", 400, 0, n1.Name, nil), description: "Pod is evicted becasuse it is a mirror pod, but it has scheduler.alpha.kubernetes.io/evict annotation",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() test.BuildTestPod("p11", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.Annotations = test.GetMirrorPodAnnotation() pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true" pod.Annotations = test.GetMirrorPodAnnotation()
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
result: true, result: true,
}, { // Pod not evicted becasuse it has system critical priority }, {
pod: test.BuildTestPod("p12", 400, 0, n1.Name, nil), description: "Pod not evicted becasuse it has system critical priority",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() test.BuildTestPod("p12", 400, 0, n1.Name, func(pod *v1.Pod) {
priority := utils.SystemCriticalPriority pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Priority = &priority priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
result: false, result: false,
}, { // Pod is evicted becasuse it has system critical priority, but it has scheduler.alpha.kubernetes.io/evict annotation }, {
pod: test.BuildTestPod("p13", 400, 0, n1.Name, nil), description: "Pod is evicted becasuse it has system critical priority, but it has scheduler.alpha.kubernetes.io/evict annotation",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() test.BuildTestPod("p13", 400, 0, n1.Name, func(pod *v1.Pod) {
priority := utils.SystemCriticalPriority pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Priority = &priority priority := utils.SystemCriticalPriority
pod.Annotations = map[string]string{ pod.Spec.Priority = &priority
"descheduler.alpha.kubernetes.io/evict": "true", pod.Annotations = map[string]string{
} "descheduler.alpha.kubernetes.io/evict": "true",
}
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
result: true, result: true,
}, { // Pod not evicted becasuse it has a priority higher than the configured priority threshold }, {
pod: test.BuildTestPod("p14", 400, 0, n1.Name, nil), description: "Pod not evicted becasuse it has a priority higher than the configured priority threshold",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() test.BuildTestPod("p14", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.Spec.Priority = &highPriority pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Priority = &highPriority
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
priorityThreshold: &lowPriority, priorityThreshold: &lowPriority,
result: false, result: false,
}, { // Pod is evicted becasuse it has a priority higher than the configured priority threshold, but it has scheduler.alpha.kubernetes.io/evict annotation }, {
pod: test.BuildTestPod("p15", 400, 0, n1.Name, nil), description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but it has scheduler.alpha.kubernetes.io/evict annotation",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() test.BuildTestPod("p15", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Priority = &highPriority pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.Spec.Priority = &highPriority
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
priorityThreshold: &lowPriority, priorityThreshold: &lowPriority,
result: true, result: true,
}, { // Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true }, {
pod: test.BuildTestPod("p16", 400, 0, n1.Name, nil), description: "Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
priority := utils.SystemCriticalPriority pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Priority = &priority priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: true, evictSystemCriticalPods: true,
result: true, result: true,
}, { // Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation }, {
pod: test.BuildTestPod("p16", 400, 0, n1.Name, nil), description: "Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
priority := utils.SystemCriticalPriority pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.Spec.Priority = &priority priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: true, evictSystemCriticalPods: true,
result: true, result: true,
}, { // Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true }, {
pod: test.BuildTestPod("p17", 400, 0, n1.Name, nil), description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.Spec.Priority = &highPriority pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Priority = &highPriority
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: true, evictSystemCriticalPods: true,
priorityThreshold: &lowPriority, priorityThreshold: &lowPriority,
result: true, result: true,
}, { // Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation }, {
pod: test.BuildTestPod("p17", 400, 0, n1.Name, nil), description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { pods: []*v1.Pod{
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Priority = &highPriority pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
pod.Spec.Priority = &highPriority
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: true, evictSystemCriticalPods: true,
priorityThreshold: &lowPriority, priorityThreshold: &lowPriority,
result: true, result: true,
}, { // Pod with no tolerations running on normal node, all other nodes tainted }, {
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil), description: "Pod with no tolerations running on normal node, all other nodes tainted",
nodes: []*v1.Node{test.BuildTestNode("node2", 1000, 2000, 13, nil), test.BuildTestNode("node3", 1000, 2000, 13, nil)}, pods: []*v1.Pod{
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
}),
for _, node := range nodes { },
nodes: []*v1.Node{
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
node.Spec.Taints = []v1.Taint{ node.Spec.Taints = []v1.Taint{
{ {
Key: nodeTaintKey, Key: nodeTaintKey,
@@ -354,27 +404,8 @@ func TestIsEvictable(t *testing.T) {
Effect: v1.TaintEffectNoSchedule, Effect: v1.TaintEffectNoSchedule,
}, },
} }
} }),
}, test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
nodeFit: true,
result: false,
}, { // Pod with correct tolerations running on normal node, all other nodes tainted
pod: test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.Spec.Tolerations = []v1.Toleration{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
nodes: []*v1.Node{test.BuildTestNode("node2", 1000, 2000, 13, nil), test.BuildTestNode("node3", 1000, 2000, 13, nil)},
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
for _, node := range nodes {
node.Spec.Taints = []v1.Taint{ node.Spec.Taints = []v1.Taint{
{ {
Key: nodeTaintKey, Key: nodeTaintKey,
@@ -382,81 +413,259 @@ func TestIsEvictable(t *testing.T) {
Effect: v1.TaintEffectNoSchedule, Effect: v1.TaintEffectNoSchedule,
}, },
} }
} }),
},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
nodeFit: true,
result: true,
}, { // Pod with incorrect node selector
pod: test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: "fail",
}
}),
nodes: []*v1.Node{test.BuildTestNode("node2", 1000, 2000, 13, nil), test.BuildTestNode("node3", 1000, 2000, 13, nil)},
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
for _, node := range nodes {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
nodeFit: true, nodeFit: true,
result: false, result: false,
}, { // Pod with correct node selector }, {
pod: test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) { description: "Pod with correct tolerations running on normal node, all other nodes tainted",
pod.Spec.NodeSelector = map[string]string{ pods: []*v1.Pod{
nodeLabelKey: nodeLabelValue, test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
} pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
}), pod.Spec.Tolerations = []v1.Toleration{
nodes: []*v1.Node{test.BuildTestNode("node2", 1000, 2000, 13, nil), test.BuildTestNode("node3", 1000, 2000, 13, nil)}, {
runBefore: func(pod *v1.Pod, nodes []*v1.Node) { Key: nodeTaintKey,
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
for _, node := range nodes { },
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
} }
} }),
},
nodes: []*v1.Node{
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
}, },
evictLocalStoragePods: false, evictLocalStoragePods: false,
evictSystemCriticalPods: false, evictSystemCriticalPods: false,
nodeFit: true, nodeFit: true,
result: true, result: true,
}, {
description: "Pod with incorrect node selector",
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: "fail",
}
}),
},
nodes: []*v1.Node{
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
nodeFit: true,
result: false,
}, {
description: "Pod with correct node selector",
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
},
nodes: []*v1.Node{
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
nodeFit: true,
result: true,
}, {
description: "Pod with correct node selector, but only available node doesn't have enough CPU",
pods: []*v1.Pod{
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
},
nodes: []*v1.Node{
test.BuildTestNode("node2-TEST", 10, 16, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
test.BuildTestNode("node3-TEST", 10, 16, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
nodeFit: true,
result: false,
}, {
description: "Pod with correct node selector, and one node has enough memory",
pods: []*v1.Pod{
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
test.BuildTestPod("node2-pod-10GB-mem", 20, 10, "node2", func(pod *v1.Pod) {
pod.ObjectMeta.Labels = map[string]string{
"test": "true",
}
}),
test.BuildTestPod("node3-pod-10GB-mem", 20, 10, "node3", func(pod *v1.Pod) {
pod.ObjectMeta.Labels = map[string]string{
"test": "true",
}
}),
},
nodes: []*v1.Node{
test.BuildTestNode("node2", 100, 16, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
test.BuildTestNode("node3", 100, 20, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
nodeFit: true,
result: true,
}, {
description: "Pod with correct node selector, but both nodes don't have enough memory",
pods: []*v1.Pod{
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
test.BuildTestPod("node2-pod-10GB-mem", 10, 10, "node2", func(pod *v1.Pod) {
pod.ObjectMeta.Labels = map[string]string{
"test": "true",
}
}),
test.BuildTestPod("node3-pod-10GB-mem", 10, 10, "node3", func(pod *v1.Pod) {
pod.ObjectMeta.Labels = map[string]string{
"test": "true",
}
}),
},
nodes: []*v1.Node{
test.BuildTestNode("node2", 100, 16, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
test.BuildTestNode("node3", 100, 16, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
nodeFit: true,
result: false,
}, },
} }
for _, test := range testCases { for _, test := range testCases {
test.runBefore(test.pod, test.nodes)
nodes := append(test.nodes, n1)
podEvictor := &PodEvictor{ t.Run(test.description, func(t *testing.T) {
evictLocalStoragePods: test.evictLocalStoragePods, ctx, cancel := context.WithCancel(context.Background())
evictSystemCriticalPods: test.evictSystemCriticalPods, defer cancel()
evictFailedBarePods: test.evictFailedBarePods,
nodes: nodes,
}
evictable := podEvictor.Evictable() nodes := append(test.nodes, n1)
var opts []func(opts *Options)
if test.priorityThreshold != nil {
opts = append(opts, WithPriorityThreshold(*test.priorityThreshold))
}
if test.nodeFit {
opts = append(opts, WithNodeFit(true))
}
evictable = podEvictor.Evictable(opts...)
result := evictable.IsEvictable(test.pod) var objs []runtime.Object
if result != test.result { for _, node := range test.nodes {
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pod.Name, test.result, result) objs = append(objs, node)
} }
for _, pod := range test.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := &PodEvictor{
client: fakeClient,
nodes: nodes,
nodeIndexer: getPodsAssignedToNode,
policyGroupVersion: policyv1.SchemeGroupVersion.String(),
dryRun: false,
maxPodsToEvictPerNode: nil,
maxPodsToEvictPerNamespace: nil,
evictLocalStoragePods: test.evictLocalStoragePods,
evictSystemCriticalPods: test.evictSystemCriticalPods,
evictFailedBarePods: test.evictFailedBarePods,
}
var opts []func(opts *Options)
if test.priorityThreshold != nil {
opts = append(opts, WithPriorityThreshold(*test.priorityThreshold))
}
if test.nodeFit {
opts = append(opts, WithNodeFit(true))
}
evictable := podEvictor.Evictable(opts...)
result := evictable.IsEvictable(test.pods[0])
if result != test.result {
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
}
})
} }
} }
func TestPodTypes(t *testing.T) { func TestPodTypes(t *testing.T) {

View File

@@ -0,0 +1,99 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package descheduler
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
componentbaseconfig "k8s.io/component-base/config"
"k8s.io/klog/v2"
"os"
)
// NewLeaderElection starts the leader election code loop
func NewLeaderElection(
run func() error,
client clientset.Interface,
LeaderElectionConfig *componentbaseconfig.LeaderElectionConfiguration,
ctx context.Context,
) error {
var id string
if hostname, err := os.Hostname(); err != nil {
// on errors, make sure we're unique
id = string(uuid.NewUUID())
} else {
// add a uniquifier so that two processes on the same host don't accidentally both become active
id = hostname + "_" + string(uuid.NewUUID())
}
klog.V(3).Infof("Assigned unique lease holder id: %s", id)
if len(LeaderElectionConfig.ResourceNamespace) == 0 {
return fmt.Errorf("namespace may not be empty")
}
if len(LeaderElectionConfig.ResourceName) == 0 {
return fmt.Errorf("name may not be empty")
}
lock, err := resourcelock.New(
LeaderElectionConfig.ResourceLock,
LeaderElectionConfig.ResourceNamespace,
LeaderElectionConfig.ResourceName,
client.CoreV1(),
client.CoordinationV1(),
resourcelock.ResourceLockConfig{
Identity: id,
},
)
if err != nil {
return fmt.Errorf("unable to create leader election lock: %v", err)
}
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: lock,
ReleaseOnCancel: true,
LeaseDuration: LeaderElectionConfig.LeaseDuration.Duration,
RenewDeadline: LeaderElectionConfig.RenewDeadline.Duration,
RetryPeriod: LeaderElectionConfig.RetryPeriod.Duration,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
klog.V(1).InfoS("Started leading")
err := run()
if err != nil {
klog.Error(err)
}
},
OnStoppedLeading: func() {
klog.V(1).InfoS("Leader lost")
},
OnNewLeader: func(identity string) {
// Just got the lock
if identity == id {
return
}
klog.V(1).Infof("New leader elected: %v", identity)
},
},
})
return nil
}

View File

@@ -18,13 +18,16 @@ package node
import ( import (
"context" "context"
"fmt"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
coreinformers "k8s.io/client-go/informers/core/v1" coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2" "k8s.io/klog/v2"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
) )
@@ -96,32 +99,90 @@ func IsReady(node *v1.Node) bool {
return true return true
} }
// PodFitsAnyOtherNode checks if the given pod fits any of the given nodes, besides the node // NodeFit returns true if the provided pod can be scheduled onto the provided node.
// the pod is already running on. The node fit is based on multiple criteria, like, pod node selector // This function is used when the NodeFit pod filtering feature of the Descheduler is enabled.
// matching the node label (including affinity), the taints on the node, and the node being schedulable or not. // This function currently considers a subset of the Kubernetes Scheduler's predicates when
func PodFitsAnyOtherNode(pod *v1.Pod, nodes []*v1.Node) bool { // deciding if a pod would fit on a node, but more predicates may be added in the future.
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) []error {
// Check node selector and required affinity
var errors []error
if ok, err := utils.PodMatchNodeSelector(pod, node); err != nil {
errors = append(errors, err)
} else if !ok {
errors = append(errors, fmt.Errorf("pod node selector does not match the node label"))
}
// Check taints (we only care about NoSchedule and NoExecute taints)
ok := utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
})
if !ok {
errors = append(errors, fmt.Errorf("pod does not tolerate taints on the node"))
}
// Check if the pod can fit on a node based off it's requests
ok, reqErrors := fitsRequest(nodeIndexer, pod, node)
if !ok {
errors = append(errors, reqErrors...)
}
// Check if node is schedulable
if IsNodeUnschedulable(node) {
errors = append(errors, fmt.Errorf("node is not schedulable"))
}
return errors
}
// PodFitsAnyOtherNode checks if the given pod will fit any of the given nodes, besides the node
// the pod is already running on. The predicates used to determine if the pod will fit can be found in the NodeFit function.
func PodFitsAnyOtherNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
for _, node := range nodes { for _, node := range nodes {
// Skip node pod is already on // Skip node pod is already on
if node.Name == pod.Spec.NodeName { if node.Name == pod.Spec.NodeName {
continue continue
} }
// Check node selector and required affinity
ok, err := utils.PodMatchNodeSelector(pod, node) errors := NodeFit(nodeIndexer, pod, node)
if err != nil || !ok { if len(errors) == 0 {
continue klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
}
// Check taints (we only care about NoSchedule and NoExecute taints)
ok = utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
})
if !ok {
continue
}
// Check if node is schedulable
if !IsNodeUnschedulable(node) {
klog.V(2).InfoS("Pod can possibly be scheduled on a different node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true return true
} else {
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
for _, err := range errors {
klog.V(4).InfoS(err.Error())
}
}
}
return false
}
// PodFitsAnyNode checks if the given pod will fit any of the given nodes. The predicates used
// to determine if the pod will fit can be found in the NodeFit function.
func PodFitsAnyNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
for _, node := range nodes {
errors := NodeFit(nodeIndexer, pod, node)
if len(errors) == 0 {
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true
} else {
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
for _, err := range errors {
klog.V(4).InfoS(err.Error())
}
}
}
return false
}
// PodFitsCurrentNode checks if the given pod will fit onto the given node. The predicates used
// to determine if the pod will fit can be found in the NodeFit function.
func PodFitsCurrentNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) bool {
errors := NodeFit(nodeIndexer, pod, node)
if len(errors) == 0 {
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true
} else {
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
for _, err := range errors {
klog.V(4).InfoS(err.Error())
} }
} }
return false return false
@@ -133,39 +194,95 @@ func IsNodeUnschedulable(node *v1.Node) bool {
return node.Spec.Unschedulable return node.Spec.Unschedulable
} }
// PodFitsAnyNode checks if the given pod fits any of the given nodes, based on // fitsRequest determines if a pod can fit on a node based on its resource requests. It returns true if
// multiple criteria, like, pod node selector matching the node label, node // the pod will fit.
// being schedulable or not. func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, []error) {
func PodFitsAnyNode(pod *v1.Pod, nodes []*v1.Node) bool { var insufficientResources []error
for _, node := range nodes {
ok, err := utils.PodMatchNodeSelector(pod, node) // Get pod requests
if err != nil || !ok { podRequests, _ := utils.PodRequestsAndLimits(pod)
continue resourceNames := make([]v1.ResourceName, 0, len(podRequests))
} for name := range podRequests {
if !IsNodeUnschedulable(node) { resourceNames = append(resourceNames, name)
klog.V(2).InfoS("Pod can possibly be scheduled on a different node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true
}
} }
return false
}
// PodFitsCurrentNode checks if the given pod fits on the given node if the pod
// node selector matches the node label.
func PodFitsCurrentNode(pod *v1.Pod, node *v1.Node) bool {
ok, err := utils.PodMatchNodeSelector(pod, node)
availableResources, err := nodeAvailableResources(nodeIndexer, node, resourceNames)
if err != nil { if err != nil {
klog.ErrorS(err, "Failed to match node selector") return false, []error{err}
return false
} }
if !ok { podFitsOnNode := true
klog.V(2).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node)) for _, resource := range resourceNames {
return false podResourceRequest := podRequests[resource]
availableResource, ok := availableResources[resource]
if !ok || podResourceRequest.MilliValue() > availableResource.MilliValue() {
insufficientResources = append(insufficientResources, fmt.Errorf("insufficient %v", resource))
podFitsOnNode = false
}
}
return podFitsOnNode, insufficientResources
}
// nodeAvailableResources returns resources mapped to the quanitity available on the node.
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName) (map[v1.ResourceName]*resource.Quantity, error) {
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
if err != nil {
return nil, err
}
nodeUtilization := NodeUtilization(podsOnNode, resourceNames)
remainingResources := map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI),
}
for _, name := range resourceNames {
if !IsBasicResource(name) {
if _, exists := node.Status.Allocatable[name]; exists {
allocatableResource := node.Status.Allocatable[name]
remainingResources[name] = resource.NewQuantity(allocatableResource.Value()-nodeUtilization[name].Value(), resource.DecimalSI)
} else {
remainingResources[name] = resource.NewQuantity(0, resource.DecimalSI)
}
}
}
return remainingResources, nil
}
// NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated.
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName) map[v1.ResourceName]*resource.Quantity {
totalReqs := map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
}
for _, name := range resourceNames {
if !IsBasicResource(name) {
totalReqs[name] = resource.NewQuantity(0, resource.DecimalSI)
}
}
for _, pod := range pods {
req, _ := utils.PodRequestsAndLimits(pod)
for _, name := range resourceNames {
quantity, ok := req[name]
if ok && name != v1.ResourcePods {
// As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero,
// the format of the quantity will be updated to the format of y.
totalReqs[name].Add(quantity)
}
}
}
return totalReqs
}
// IsBasicResource checks if resource is basic native.
func IsBasicResource(name v1.ResourceName) bool {
switch name {
case v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods:
return true
default:
return false
} }
klog.V(2).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true
} }

View File

@@ -21,9 +21,12 @@ import (
"testing" "testing"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
@@ -147,13 +150,13 @@ func TestPodFitsCurrentNode(t *testing.T) {
}, },
}, },
}, },
node: &v1.Node{ node: test.BuildTestNode("node1", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
ObjectMeta: metav1.ObjectMeta{ node.ObjectMeta.Labels = map[string]string{
Labels: map[string]string{ nodeLabelKey: nodeLabelValue,
nodeLabelKey: nodeLabelValue, }
},
}, node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}, }),
success: true, success: true,
}, },
{ {
@@ -181,27 +184,48 @@ func TestPodFitsCurrentNode(t *testing.T) {
}, },
}, },
}, },
node: &v1.Node{ node: test.BuildTestNode("node1", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
ObjectMeta: metav1.ObjectMeta{ node.ObjectMeta.Labels = map[string]string{
Labels: map[string]string{ nodeLabelKey: "no",
nodeLabelKey: "no", }
},
}, node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}, }),
success: false, success: false,
}, },
} }
for _, tc := range tests { for _, tc := range tests {
actual := PodFitsCurrentNode(tc.pod, tc.node) t.Run(tc.description, func(t *testing.T) {
if actual != tc.success { ctx, cancel := context.WithCancel(context.Background())
t.Errorf("Test %#v failed", tc.description) defer cancel()
}
var objs []runtime.Object
objs = append(objs, tc.node)
objs = append(objs, tc.pod)
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
actual := PodFitsCurrentNode(getPodsAssignedToNode, tc.pod, tc.node)
if actual != tc.success {
t.Errorf("Test %#v failed", tc.description)
}
})
} }
} }
func TestPodFitsAnyOtherNode(t *testing.T) { func TestPodFitsAnyOtherNode(t *testing.T) {
nodeLabelKey := "kubernetes.io/desiredNode" nodeLabelKey := "kubernetes.io/desiredNode"
nodeLabelValue := "yes" nodeLabelValue := "yes"
nodeTaintKey := "hardware" nodeTaintKey := "hardware"
@@ -215,238 +239,527 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
pod *v1.Pod pod *v1.Pod
nodes []*v1.Node nodes []*v1.Node
success bool success bool
podsOnNodes []*v1.Pod
}{ }{
{ {
description: "Pod fits another node matching node affinity", description: "Pod fits another node matching node affinity",
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue), pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
nodes: []*v1.Node{ nodes: []*v1.Node{
{ test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
ObjectMeta: metav1.ObjectMeta{ node.ObjectMeta.Labels = map[string]string{
Name: nodeNames[0], nodeLabelKey: nodeLabelValue,
Labels: map[string]string{ }
nodeLabelKey: nodeLabelValue,
}, node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}, }),
}, test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
{ node.ObjectMeta.Labels = map[string]string{
ObjectMeta: metav1.ObjectMeta{ nodeLabelKey: "no",
Name: nodeNames[1], }
Labels: map[string]string{
nodeLabelKey: "no", node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}, }),
}, test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
{
ObjectMeta: metav1.ObjectMeta{
Name: nodeNames[2],
},
},
}, },
success: true, podsOnNodes: []*v1.Pod{},
success: true,
}, },
{ {
description: "Pod expected to fit one of the nodes", description: "Pod expected to fit one of the nodes",
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue), pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
nodes: []*v1.Node{ nodes: []*v1.Node{
{ test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
ObjectMeta: metav1.ObjectMeta{ node.ObjectMeta.Labels = map[string]string{
Name: nodeNames[0], nodeLabelKey: "no",
Labels: map[string]string{ }
nodeLabelKey: "no",
}, node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}, }),
}, test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
{ node.ObjectMeta.Labels = map[string]string{
ObjectMeta: metav1.ObjectMeta{ nodeLabelKey: nodeLabelValue,
Name: nodeNames[1], }
Labels: map[string]string{
nodeLabelKey: nodeLabelValue, node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}, }),
}, test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
{
ObjectMeta: metav1.ObjectMeta{
Name: nodeNames[2],
},
},
}, },
success: true, podsOnNodes: []*v1.Pod{},
success: true,
}, },
{ {
description: "Pod expected to fit none of the nodes", description: "Pod expected to fit none of the nodes",
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue), pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
nodes: []*v1.Node{ nodes: []*v1.Node{
{ test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
ObjectMeta: metav1.ObjectMeta{ node.ObjectMeta.Labels = map[string]string{
Name: nodeNames[0], nodeLabelKey: "unfit1",
Labels: map[string]string{ }
nodeLabelKey: "unfit1",
}, node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}, }),
}, test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
{ node.ObjectMeta.Labels = map[string]string{
ObjectMeta: metav1.ObjectMeta{ nodeLabelKey: "unfit2",
Name: nodeNames[1], }
Labels: map[string]string{
nodeLabelKey: "unfit2", node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}, }),
}, test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
{
ObjectMeta: metav1.ObjectMeta{
Name: nodeNames[2],
},
},
}, },
success: false, podsOnNodes: []*v1.Pod{},
success: false,
}, },
{ {
description: "Nodes are unschedulable but labels match, should fail", description: "Nodes are unschedulable but labels match, should fail",
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue), pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
}),
nodes: []*v1.Node{ nodes: []*v1.Node{
{ test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
ObjectMeta: metav1.ObjectMeta{ node.ObjectMeta.Labels = map[string]string{
Name: nodeNames[0], nodeLabelKey: nodeLabelValue,
Labels: map[string]string{ }
nodeLabelKey: nodeLabelValue,
}, node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}, node.Spec.Unschedulable = true
Spec: v1.NodeSpec{ }),
Unschedulable: true, test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
}, node.ObjectMeta.Labels = map[string]string{
}, nodeLabelKey: "no",
{ }
ObjectMeta: metav1.ObjectMeta{
Name: nodeNames[1], node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
Labels: map[string]string{ }),
nodeLabelKey: "no", test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: nodeNames[2],
},
},
}, },
success: false, podsOnNodes: []*v1.Pod{},
success: false,
}, },
{ {
description: "Two nodes matches node selector, one of them is tained, should pass", description: "Both nodes are tained, should fail",
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue), pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{ nodes: []*v1.Node{
{ test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
ObjectMeta: metav1.ObjectMeta{ node.ObjectMeta.Labels = map[string]string{
Name: nodeNames[0], nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{},
success: false,
},
{
description: "Two nodes matches node selector, one of them is tained, there is a pod on the available node, and requests are low, should pass",
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("test-pod", 12*1000, 20*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{ Labels: map[string]string{
nodeLabelKey: nodeLabelValue, "test": "true",
}, },
}, }
Spec: v1.NodeSpec{ pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(40*1000*1000*1000, resource.DecimalSI)
Taints: []v1.Taint{ }),
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: nodeNames[1],
Labels: map[string]string{
nodeLabelKey: nodeLabelValue,
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: nodeNames[2],
},
},
}, },
success: true, success: true,
}, },
{ {
description: "Both nodes are tained, should fail", description: "Two nodes matches node selector, one of them is tained, but CPU requests are too big, should fail",
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue), pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{ nodes: []*v1.Node{
{ test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
ObjectMeta: metav1.ObjectMeta{ node.ObjectMeta.Labels = map[string]string{
Name: nodeNames[0], nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
// Notice that this node only has 4 cores, the pod already on the node below requests 3 cores, and the pod above requests 2 cores
test.BuildTestNode(nodeNames[1], 4000, 8*1000*1000*1000, 12, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("3-core-pod", 3000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{ Labels: map[string]string{
nodeLabelKey: nodeLabelValue, "test": "true",
}, },
}, }
Spec: v1.NodeSpec{ pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
Taints: []v1.Taint{ }),
{ },
Key: nodeTaintKey, success: false,
Value: nodeTaintValue, },
Effect: v1.TaintEffectNoSchedule, {
}, description: "Two nodes matches node selector, one of them is tained, but memory requests are too big, should fail",
pod: test.BuildTestPod("p1", 2000, 5*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
}, },
}, }
}, }),
{ // Notice that this node only has 8GB of memory, the pod already on the node below requests 4GB, and the pod above requests 5GB
ObjectMeta: metav1.ObjectMeta{ test.BuildTestNode(nodeNames[1], 10*1000, 8*1000*1000*1000, 12, func(node *v1.Node) {
Name: nodeNames[1], node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("4GB-mem-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{ Labels: map[string]string{
nodeLabelKey: nodeLabelValue, "test": "true",
}, },
}, }
Spec: v1.NodeSpec{ pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
Taints: []v1.Taint{ }),
{ },
Key: nodeTaintKey, success: false,
Value: nodeTaintValue, },
Effect: v1.TaintEffectNoExecute, {
}, description: "Two nodes matches node selector, one of them is tained, but ephemeral storage requests are too big, should fail",
pod: test.BuildTestPod("p1", 2000, 4*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
}, },
}, }
}, }),
{ // Notice that this node only has 20GB of storage, the pod already on the node below requests 11GB, and the pod above requests 10GB
ObjectMeta: metav1.ObjectMeta{ test.BuildTestNode(nodeNames[1], 10*1000, 8*1000*1000*1000, 12, func(node *v1.Node) {
Name: nodeNames[2], node.ObjectMeta.Labels = map[string]string{
}, nodeLabelKey: nodeLabelValue,
}, }
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(20*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("11GB-storage-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{
"test": "true",
},
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(11*1000*1000*1000, resource.DecimalSI)
}),
},
success: false,
},
{
description: "Two nodes matches node selector, one of them is tained, but custom resource requests are too big, should fail",
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
pod.Spec.Containers[0].Resources.Requests["example.com/custom-resource"] = *resource.NewQuantity(10, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
node.Status.Allocatable["example.com/custom-resource"] = *resource.NewQuantity(15, resource.DecimalSI)
}),
// Notice that this node only has 15 of the custom resource, the pod already on the node below requests 10, and the pod above requests 10
test.BuildTestNode(nodeNames[1], 10*1000, 8*1000*1000*1000, 12, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
node.Status.Allocatable["example.com/custom-resource"] = *resource.NewQuantity(15, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("10-custom-resource-pod", 0, 0, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{
"test": "true",
},
}
pod.Spec.Containers[0].Resources.Requests["example.com/custom-resource"] = *resource.NewQuantity(10, resource.DecimalSI)
}),
},
success: false,
},
{
description: "Two nodes matches node selector, one of them is tained, CPU requests will fit, and pod Overhead is low enough, should pass",
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
// Notice that this node has 5 CPU cores, the pod below requests 2 cores, and has CPU overhead of 1 cores, and the pod above requests 1 core
test.BuildTestNode(nodeNames[1], 5000, 8*1000*1000*1000, 12, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{
"test": "true",
},
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
}),
},
success: true,
},
{
description: "Two nodes matches node selector, one of them is tained, CPU requests will fit, but pod Overhead is too high, should fail",
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
nodeLabelKey: nodeLabelValue,
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
}),
nodes: []*v1.Node{
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
node.Spec.Taints = []v1.Taint{
{
Key: nodeTaintKey,
Value: nodeTaintValue,
Effect: v1.TaintEffectNoSchedule,
},
}
}),
// Notice that this node only has 5 CPU cores, the pod below requests 2 cores, but has CPU overhead of 2 cores, and the pod above requests 2 cores
test.BuildTestNode(nodeNames[1], 5000, 8*1000*1000*1000, 12, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeLabelKey: nodeLabelValue,
}
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
}),
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
},
podsOnNodes: []*v1.Pod{
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
pod.ObjectMeta = metav1.ObjectMeta{
Namespace: "test",
Labels: map[string]string{
"test": "true",
},
}
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
pod.Spec.Overhead = createResourceList(2000, 1000*1000*1000, 1000*1000*1000)
}),
}, },
success: false, success: false,
}, },
} }
for _, tc := range tests { for _, tc := range tests {
actual := PodFitsAnyOtherNode(tc.pod, tc.nodes) t.Run(tc.description, func(t *testing.T) {
if actual != tc.success { ctx, cancel := context.WithCancel(context.Background())
t.Errorf("Test %#v failed", tc.description) defer cancel()
}
var objs []runtime.Object
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range tc.podsOnNodes {
objs = append(objs, pod)
}
objs = append(objs, tc.pod)
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
actual := PodFitsAnyOtherNode(getPodsAssignedToNode, tc.pod, tc.nodes)
if actual != tc.success {
t.Errorf("Test %#v failed", tc.description)
}
})
} }
} }
func createPodManifest(nodeName string, nodeSelectorKey string, nodeSelectorValue string) *v1.Pod { // createResourceList builds a small resource list of core resources
return (&v1.Pod{ func createResourceList(cpu int64, memory int64, ephemeralStorage int64) v1.ResourceList {
Spec: v1.PodSpec{ resourceList := make(map[v1.ResourceName]resource.Quantity)
NodeName: nodeName, resourceList[v1.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
Affinity: &v1.Affinity{ resourceList[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.DecimalSI)
NodeAffinity: &v1.NodeAffinity{ resourceList[v1.ResourceEphemeralStorage] = *resource.NewQuantity(ephemeralStorage, resource.DecimalSI)
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ return resourceList
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeSelectorKey,
Operator: "In",
Values: []string{
nodeSelectorValue,
},
},
},
},
},
},
},
},
},
})
} }

View File

@@ -67,6 +67,7 @@ func TestFindDuplicatePods(t *testing.T) {
Unschedulable: true, Unschedulable: true,
} }
}) })
node6 := test.BuildTestNode("n6", 200, 200, 10, nil)
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil) p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
p1.Namespace = "dev" p1.Namespace = "dev"
@@ -102,6 +103,14 @@ func TestFindDuplicatePods(t *testing.T) {
p18 := test.BuildTestPod("TARGET", 100, 0, node1.Name, nil) p18 := test.BuildTestPod("TARGET", 100, 0, node1.Name, nil)
p18.Namespace = "node-fit" p18.Namespace = "node-fit"
// This pod sits on node6 and is used to take up CPU requests on the node
p19 := test.BuildTestPod("CPU-eater", 150, 150, node6.Name, nil)
p19.Namespace = "test"
// Dummy pod for node6 used to do the opposite of p19
p20 := test.BuildTestPod("CPU-saver", 100, 150, node6.Name, nil)
p20.Namespace = "test"
// ### Evictable Pods ### // ### Evictable Pods ###
// Three Pods in the "default" Namespace, bound to same ReplicaSet. 2 should be evicted. // Three Pods in the "default" Namespace, bound to same ReplicaSet. 2 should be evicted.
@@ -263,6 +272,20 @@ func TestFindDuplicatePods(t *testing.T) {
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}}, strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
}, },
{
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available does not have enough CPU, and nodeFit set to true. 0 should be evicted.",
pods: []*v1.Pod{p1, p2, p3, p19},
nodes: []*v1.Node{node1, node6},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
},
{
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available has enough CPU, and nodeFit set to true. 1 should be evicted.",
pods: []*v1.Pod{p1, p2, p3, p20},
nodes: []*v1.Node{node1, node6},
expectedEvictedPodCount: 1,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
},
} }
for _, testCase := range testCases { for _, testCase := range testCases {
@@ -297,6 +320,7 @@ func TestFindDuplicatePods(t *testing.T) {
nil, nil,
nil, nil,
testCase.nodes, testCase.nodes,
getPodsAssignedToNode,
false, false,
false, false,
false, false,
@@ -354,7 +378,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
node.Spec.Taints = []v1.Taint{ node.Spec.Taints = []v1.Taint{
{ {
Effect: v1.TaintEffectNoSchedule, Effect: v1.TaintEffectNoSchedule,
Key: "node-role.kubernetes.io/master", Key: "node-role.kubernetes.io/control-plane",
}, },
} }
} }
@@ -363,7 +387,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
if node.ObjectMeta.Labels == nil { if node.ObjectMeta.Labels == nil {
node.ObjectMeta.Labels = map[string]string{} node.ObjectMeta.Labels = map[string]string{}
} }
node.ObjectMeta.Labels["node-role.kubernetes.io/master"] = "" node.ObjectMeta.Labels["node-role.kubernetes.io/control-plane"] = ""
} }
setWorkerLabel := func(node *v1.Node) { setWorkerLabel := func(node *v1.Node) {
@@ -383,7 +407,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
{ {
MatchExpressions: []v1.NodeSelectorRequirement{ MatchExpressions: []v1.NodeSelectorRequirement{
{ {
Key: "node-role.kubernetes.io/master", Key: "node-role.kubernetes.io/control-plane",
Operator: v1.NodeSelectorOpDoesNotExist, Operator: v1.NodeSelectorOpDoesNotExist,
}, },
{ {
@@ -407,7 +431,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
{ {
MatchExpressions: []v1.NodeSelectorRequirement{ MatchExpressions: []v1.NodeSelectorRequirement{
{ {
Key: "node-role.kubernetes.io/master", Key: "node-role.kubernetes.io/control-plane",
Operator: v1.NodeSelectorOpDoesNotExist, Operator: v1.NodeSelectorOpDoesNotExist,
}, },
{ {
@@ -724,6 +748,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
nil, nil,
nil, nil,
testCase.nodes, testCase.nodes,
getPodsAssignedToNode,
false, false,
false, false,
false, false,

View File

@@ -166,9 +166,12 @@ func TestRemoveFailedPods(t *testing.T) {
{ {
description: "nodeFit=true, 1 unschedulable node, 1 container terminated with reason NodeAffinity, 0 eviction", description: "nodeFit=true, 1 unschedulable node, 1 container terminated with reason NodeAffinity, 0 eviction",
strategy: createStrategy(true, false, nil, nil, nil, true), strategy: createStrategy(true, false, nil, nil, nil, true),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, func(node *v1.Node) { nodes: []*v1.Node{
node.Spec.Unschedulable = true test.BuildTestNode("node1", 2000, 3000, 10, nil),
})}, test.BuildTestNode("node2", 2000, 2000, 10, func(node *v1.Node) {
node.Spec.Unschedulable = true
}),
},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
pods: []*v1.Pod{ pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{ buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
@@ -176,6 +179,17 @@ func TestRemoveFailedPods(t *testing.T) {
}), nil), }), nil),
}, },
}, },
{
description: "nodeFit=true, only available node does not have enough resources, 1 container terminated with reason CreateContainerConfigError, 0 eviction",
strategy: createStrategy(true, false, []string{"CreateContainerConfigError"}, nil, nil, true),
nodes: []*v1.Node{test.BuildTestNode("node1", 1, 1, 10, nil), test.BuildTestNode("node2", 0, 0, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "CreateContainerConfigError"},
}), nil),
},
},
{ {
description: "excluded owner kind=ReplicaSet, 1 init container terminated with owner kind=ReplicaSet, 0 eviction", description: "excluded owner kind=ReplicaSet, 1 init container terminated with owner kind=ReplicaSet, 0 eviction",
strategy: createStrategy(true, true, nil, []string{"ReplicaSet"}, nil, false), strategy: createStrategy(true, true, nil, []string{"ReplicaSet"}, nil, false),
@@ -261,6 +275,7 @@ func TestRemoveFailedPods(t *testing.T) {
nil, nil,
nil, nil,
tc.nodes, tc.nodes,
getPodsAssignedToNode,
false, false,
false, false,
false, false,

View File

@@ -95,8 +95,8 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
getPodsAssignedToNode, getPodsAssignedToNode,
podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool { podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
return evictable.IsEvictable(pod) && return evictable.IsEvictable(pod) &&
!nodeutil.PodFitsCurrentNode(pod, node) && !nodeutil.PodFitsCurrentNode(getPodsAssignedToNode, pod, node) &&
nodeutil.PodFitsAnyNode(pod, nodes) nodeutil.PodFitsAnyNode(getPodsAssignedToNode, pod, nodes)
}), }),
) )
if err != nil { if err != nil {

View File

@@ -222,6 +222,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
tc.maxPodsToEvictPerNode, tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace, tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes, tc.nodes,
getPodsAssignedToNode,
false, false,
false, false,
false, false,

View File

@@ -55,13 +55,16 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
return return
} }
var includedNamespaces, excludedNamespaces sets.String var includedNamespaces, excludedNamespaces, excludedTaints sets.String
var labelSelector *metav1.LabelSelector var labelSelector *metav1.LabelSelector
if strategy.Params != nil { if strategy.Params != nil {
if strategy.Params.Namespaces != nil { if strategy.Params.Namespaces != nil {
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...) includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...) excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
} }
if strategy.Params.ExcludedTaints != nil {
excludedTaints = sets.NewString(strategy.Params.ExcludedTaints...)
}
labelSelector = strategy.Params.LabelSelector labelSelector = strategy.Params.LabelSelector
} }
@@ -89,6 +92,18 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
return return
} }
excludeTaint := func(taint *v1.Taint) bool {
// Exclude taints by key *or* key=value
return excludedTaints.Has(taint.Key) || (taint.Value != "" && excludedTaints.Has(fmt.Sprintf("%s=%s", taint.Key, taint.Value)))
}
taintFilterFnc := func(taint *v1.Taint) bool { return (taint.Effect == v1.TaintEffectNoSchedule) && !excludeTaint(taint) }
if strategy.Params != nil && strategy.Params.IncludePreferNoSchedule {
taintFilterFnc = func(taint *v1.Taint) bool {
return (taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectPreferNoSchedule) && !excludeTaint(taint)
}
}
for _, node := range nodes { for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node)) klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListAllPodsOnANode(node.Name, getPodsAssignedToNode, podFilter) pods, err := podutil.ListAllPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
@@ -101,7 +116,7 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
if !utils.TolerationsTolerateTaintsWithFilter( if !utils.TolerationsTolerateTaintsWithFilter(
pods[i].Spec.Tolerations, pods[i].Spec.Tolerations,
node.Spec.Taints, node.Spec.Taints,
func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule }, taintFilterFnc,
) { ) {
klog.V(2).InfoS("Not all taints with NoSchedule effect are tolerated after update for pod on node", "pod", klog.KObj(pods[i]), "node", klog.KObj(node)) klog.V(2).InfoS("Not all taints with NoSchedule effect are tolerated after update for pod on node", "pod", klog.KObj(pods[i]), "node", klog.KObj(node))
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "NodeTaint"); err != nil { if _, err := podEvictor.EvictPod(ctx, pods[i], node, "NodeTaint"); err != nil {

View File

@@ -27,6 +27,14 @@ func createNoScheduleTaint(key, value string, index int) v1.Taint {
} }
} }
func createPreferNoScheduleTaint(key, value string, index int) v1.Taint {
return v1.Taint{
Key: "testTaint" + fmt.Sprintf("%v", index),
Value: "test" + fmt.Sprintf("%v", index),
Effect: v1.TaintEffectPreferNoSchedule,
}
}
func addTaintsToNode(node *v1.Node, key, value string, indices []int) *v1.Node { func addTaintsToNode(node *v1.Node, key, value string, indices []int) *v1.Node {
taints := []v1.Taint{} taints := []v1.Taint{}
for _, index := range indices { for _, index := range indices {
@@ -36,12 +44,12 @@ func addTaintsToNode(node *v1.Node, key, value string, indices []int) *v1.Node {
return node return node
} }
func addTolerationToPod(pod *v1.Pod, key, value string, index int) *v1.Pod { func addTolerationToPod(pod *v1.Pod, key, value string, index int, effect v1.TaintEffect) *v1.Pod {
if pod.Annotations == nil { if pod.Annotations == nil {
pod.Annotations = map[string]string{} pod.Annotations = map[string]string{}
} }
pod.Spec.Tolerations = []v1.Toleration{{Key: key + fmt.Sprintf("%v", index), Value: value + fmt.Sprintf("%v", index), Effect: v1.TaintEffectNoSchedule}} pod.Spec.Tolerations = []v1.Toleration{{Key: key + fmt.Sprintf("%v", index), Value: value + fmt.Sprintf("%v", index), Effect: effect}}
return pod return pod
} }
@@ -63,6 +71,16 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
} }
}) })
node5 := test.BuildTestNode("n5", 2000, 3000, 10, nil)
node5.Spec.Taints = []v1.Taint{
createPreferNoScheduleTaint("testTaint", "test", 1),
}
node6 := test.BuildTestNode("n6", 1, 1, 1, nil)
node6.Spec.Taints = []v1.Taint{
createPreferNoScheduleTaint("testTaint", "test", 1),
}
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil) p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil) p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil) p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
@@ -109,14 +127,20 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
// A Mirror Pod. // A Mirror Pod.
p10.Annotations = test.GetMirrorPodAnnotation() p10.Annotations = test.GetMirrorPodAnnotation()
p1 = addTolerationToPod(p1, "testTaint", "test", 1) p1 = addTolerationToPod(p1, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
p3 = addTolerationToPod(p3, "testTaint", "test", 1) p3 = addTolerationToPod(p3, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
p4 = addTolerationToPod(p4, "testTaintX", "testX", 1) p4 = addTolerationToPod(p4, "testTaintX", "testX", 1, v1.TaintEffectNoSchedule)
p12.Spec.NodeSelector = map[string]string{ p12.Spec.NodeSelector = map[string]string{
"datacenter": "west", "datacenter": "west",
} }
p13 := test.BuildTestPod("p13", 100, 0, node5.Name, nil)
p13.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
// node5 has PreferNoSchedule:testTaint1=test1, so the p13 has to have
// PreferNoSchedule:testTaint0=test0 so the pod is not tolarated
p13 = addTolerationToPod(p13, "testTaint", "test", 0, v1.TaintEffectPreferNoSchedule)
var uint1 uint = 1 var uint1 uint = 1
tests := []struct { tests := []struct {
@@ -129,6 +153,8 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
maxNoOfPodsToEvictPerNamespace *uint maxNoOfPodsToEvictPerNamespace *uint
expectedEvictedPodCount uint expectedEvictedPodCount uint
nodeFit bool nodeFit bool
includePreferNoSchedule bool
excludedTaints []string
}{ }{
{ {
@@ -224,6 +250,59 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
expectedEvictedPodCount: 0, //p2 gets evicted expectedEvictedPodCount: 0, //p2 gets evicted
nodeFit: true, nodeFit: true,
}, },
{
description: "Pods not tolerating PreferNoSchedule node taint should not be evicted when not enabled",
pods: []*v1.Pod{p13},
nodes: []*v1.Node{node5},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
expectedEvictedPodCount: 0,
},
{
description: "Pods not tolerating PreferNoSchedule node taint should be evicted when enabled",
pods: []*v1.Pod{p13},
nodes: []*v1.Node{node5},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
includePreferNoSchedule: true,
expectedEvictedPodCount: 1, // p13 gets evicted
},
{
description: "Pods not tolerating excluded node taints (by key) should not be evicted",
pods: []*v1.Pod{p2},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
excludedTaints: []string{"excludedTaint1", "testTaint1"},
expectedEvictedPodCount: 0, // nothing gets evicted, as one of the specified excludedTaints matches the key of node1's taint
},
{
description: "Pods not tolerating excluded node taints (by key and value) should not be evicted",
pods: []*v1.Pod{p2},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
excludedTaints: []string{"testTaint1=test1"},
expectedEvictedPodCount: 0, // nothing gets evicted, as both the key and value of the excluded taint match node1's taint
},
{
description: "The excluded taint matches the key of node1's taint, but does not match the value",
pods: []*v1.Pod{p2},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
excludedTaints: []string{"testTaint1=test2"},
expectedEvictedPodCount: 1, // pod gets evicted, as excluded taint value does not match node1's taint value
},
{
description: "Critical and non critical pods, pods not tolerating node taint can't be evicted because the only available node does not have enough resources.",
pods: []*v1.Pod{p2, p7, p9, p10},
nodes: []*v1.Node{node1, node6},
evictLocalStoragePods: false,
evictSystemCriticalPods: true,
expectedEvictedPodCount: 0, //p2 and p7 can't be evicted
nodeFit: true,
},
} }
for _, tc := range tests { for _, tc := range tests {
@@ -259,6 +338,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
tc.maxPodsToEvictPerNode, tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace, tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes, tc.nodes,
getPodsAssignedToNode,
tc.evictLocalStoragePods, tc.evictLocalStoragePods,
tc.evictSystemCriticalPods, tc.evictSystemCriticalPods,
false, false,
@@ -268,7 +348,9 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
strategy := api.DeschedulerStrategy{ strategy := api.DeschedulerStrategy{
Params: &api.StrategyParameters{ Params: &api.StrategyParameters{
NodeFit: tc.nodeFit, NodeFit: tc.nodeFit,
IncludePreferNoSchedule: tc.includePreferNoSchedule,
ExcludedTaints: tc.excludedTaints,
}, },
} }

View File

@@ -63,16 +63,17 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
resourceNames := getResourceNames(targetThresholds) resourceNames := getResourceNames(targetThresholds)
sourceNodes, highNodes := classifyNodes( sourceNodes, highNodes := classifyNodes(
getNodeUsage(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode), getNodeUsage(nodes, resourceNames, getPodsAssignedToNode),
func(node *v1.Node, usage NodeUsage) bool { getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode, false),
return isNodeWithLowUtilization(usage) func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
}, },
func(node *v1.Node, usage NodeUsage) bool { func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
if nodeutil.IsNodeUnschedulable(node) { if nodeutil.IsNodeUnschedulable(node) {
klog.V(2).InfoS("Node is unschedulable", "node", klog.KObj(node)) klog.V(2).InfoS("Node is unschedulable", "node", klog.KObj(node))
return false return false
} }
return !isNodeWithLowUtilization(usage) return !isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
}) })
// log message in one line // log message in one line
@@ -82,7 +83,7 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
"Pods", thresholds[v1.ResourcePods], "Pods", thresholds[v1.ResourcePods],
} }
for name := range thresholds { for name := range thresholds {
if !isBasicResource(name) { if !nodeutil.IsBasicResource(name) {
keysAndValues = append(keysAndValues, string(name), int64(thresholds[name])) keysAndValues = append(keysAndValues, string(name), int64(thresholds[name]))
} }
} }
@@ -110,7 +111,7 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
// stop if the total available usage has dropped to zero - no more pods can be scheduled // stop if the total available usage has dropped to zero - no more pods can be scheduled
continueEvictionCond := func(nodeUsage NodeUsage, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool { continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
for name := range totalAvailableUsage { for name := range totalAvailableUsage {
if totalAvailableUsage[name].CmpInt64(0) < 1 { if totalAvailableUsage[name].CmpInt64(0) < 1 {
return false return false
@@ -119,6 +120,10 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
return true return true
} }
// Sort the nodes by the usage in ascending order
sortNodesByUsage(sourceNodes, true)
evictPodsFromSourceNodes( evictPodsFromSourceNodes(
ctx, ctx,
sourceNodes, sourceNodes,
@@ -159,7 +164,7 @@ func setDefaultForThresholds(thresholds, targetThresholds api.ResourceThresholds
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
for name := range thresholds { for name := range thresholds {
if !isBasicResource(name) { if !nodeutil.IsBasicResource(name) {
targetThresholds[name] = MaxResourcePercentage targetThresholds[name] = MaxResourcePercentage
} }
} }

View File

@@ -385,6 +385,50 @@ func TestHighNodeUtilization(t *testing.T) {
}, },
expectedPodsEvicted: 0, expectedPodsEvicted: 0,
}, },
{
name: "Other node does not have enough Memory",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 200, 9, nil),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 50, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p2", 400, 50, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 50, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 50, n1NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p5", 400, 100, n2NodeName, func(pod *v1.Pod) {
// A pod requesting more memory than is available on node1
test.SetRSOwnerRef(pod)
}),
},
expectedPodsEvicted: 0,
},
{
name: "Other node does not have enough Memory",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 200, 9, nil),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 50, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p2", 400, 50, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 50, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 50, n1NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p5", 400, 100, n2NodeName, func(pod *v1.Pod) {
// A pod requesting more memory than is available on node1
test.SetRSOwnerRef(pod)
}),
},
expectedPodsEvicted: 0,
},
} }
for _, testCase := range testCases { for _, testCase := range testCases {
@@ -463,6 +507,7 @@ func TestHighNodeUtilization(t *testing.T) {
nil, nil,
nil, nil,
testCase.nodes, testCase.nodes,
getPodsAssignedToNode,
false, false,
false, false,
false, false,
@@ -668,6 +713,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
&item.evictionsExpected, &item.evictionsExpected,
nil, nil,
item.nodes, item.nodes,
getPodsAssignedToNode,
false, false,
false, false,
false, false,

View File

@@ -50,40 +50,57 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
if strategy.Params != nil { if strategy.Params != nil {
nodeFit = strategy.Params.NodeFit nodeFit = strategy.Params.NodeFit
} }
useDeviationThresholds := strategy.Params.NodeResourceUtilizationThresholds.UseDeviationThresholds
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
if err := validateLowUtilizationStrategyConfig(thresholds, targetThresholds); err != nil { if err := validateLowUtilizationStrategyConfig(thresholds, targetThresholds, useDeviationThresholds); err != nil {
klog.ErrorS(err, "LowNodeUtilization config is not valid") klog.ErrorS(err, "LowNodeUtilization config is not valid")
return return
} }
// check if Pods/CPU/Mem are set, if not, set them to 100 // check if Pods/CPU/Mem are set, if not, set them to 100
if _, ok := thresholds[v1.ResourcePods]; !ok { if _, ok := thresholds[v1.ResourcePods]; !ok {
thresholds[v1.ResourcePods] = MaxResourcePercentage if useDeviationThresholds {
targetThresholds[v1.ResourcePods] = MaxResourcePercentage thresholds[v1.ResourcePods] = MinResourcePercentage
targetThresholds[v1.ResourcePods] = MinResourcePercentage
} else {
thresholds[v1.ResourcePods] = MaxResourcePercentage
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
}
} }
if _, ok := thresholds[v1.ResourceCPU]; !ok { if _, ok := thresholds[v1.ResourceCPU]; !ok {
thresholds[v1.ResourceCPU] = MaxResourcePercentage if useDeviationThresholds {
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage thresholds[v1.ResourceCPU] = MinResourcePercentage
targetThresholds[v1.ResourceCPU] = MinResourcePercentage
} else {
thresholds[v1.ResourceCPU] = MaxResourcePercentage
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
}
} }
if _, ok := thresholds[v1.ResourceMemory]; !ok { if _, ok := thresholds[v1.ResourceMemory]; !ok {
thresholds[v1.ResourceMemory] = MaxResourcePercentage if useDeviationThresholds {
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage thresholds[v1.ResourceMemory] = MinResourcePercentage
targetThresholds[v1.ResourceMemory] = MinResourcePercentage
} else {
thresholds[v1.ResourceMemory] = MaxResourcePercentage
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
}
} }
resourceNames := getResourceNames(thresholds) resourceNames := getResourceNames(thresholds)
lowNodes, sourceNodes := classifyNodes( lowNodes, sourceNodes := classifyNodes(
getNodeUsage(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode), getNodeUsage(nodes, resourceNames, getPodsAssignedToNode),
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode, useDeviationThresholds),
// The node has to be schedulable (to be able to move workload there) // The node has to be schedulable (to be able to move workload there)
func(node *v1.Node, usage NodeUsage) bool { func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
if nodeutil.IsNodeUnschedulable(node) { if nodeutil.IsNodeUnschedulable(node) {
klog.V(2).InfoS("Node is unschedulable, thus not considered as underutilized", "node", klog.KObj(node)) klog.V(2).InfoS("Node is unschedulable, thus not considered as underutilized", "node", klog.KObj(node))
return false return false
} }
return isNodeWithLowUtilization(usage) return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
}, },
func(node *v1.Node, usage NodeUsage) bool { func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
return isNodeAboveTargetUtilization(usage) return isNodeAboveTargetUtilization(usage, threshold.highResourceThreshold)
}, },
) )
@@ -94,7 +111,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
"Pods", thresholds[v1.ResourcePods], "Pods", thresholds[v1.ResourcePods],
} }
for name := range thresholds { for name := range thresholds {
if !isBasicResource(name) { if !nodeutil.IsBasicResource(name) {
keysAndValues = append(keysAndValues, string(name), int64(thresholds[name])) keysAndValues = append(keysAndValues, string(name), int64(thresholds[name]))
} }
} }
@@ -108,7 +125,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
"Pods", targetThresholds[v1.ResourcePods], "Pods", targetThresholds[v1.ResourcePods],
} }
for name := range targetThresholds { for name := range targetThresholds {
if !isBasicResource(name) { if !nodeutil.IsBasicResource(name) {
keysAndValues = append(keysAndValues, string(name), int64(targetThresholds[name])) keysAndValues = append(keysAndValues, string(name), int64(targetThresholds[name]))
} }
} }
@@ -138,8 +155,8 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit)) evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved // stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
continueEvictionCond := func(nodeUsage NodeUsage, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool { continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
if !isNodeAboveTargetUtilization(nodeUsage) { if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
return false return false
} }
for name := range totalAvailableUsage { for name := range totalAvailableUsage {
@@ -151,6 +168,9 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
return true return true
} }
// Sort the nodes by the usage in descending order
sortNodesByUsage(sourceNodes, false)
evictPodsFromSourceNodes( evictPodsFromSourceNodes(
ctx, ctx,
sourceNodes, sourceNodes,
@@ -165,7 +185,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
} }
// validateLowUtilizationStrategyConfig checks if the strategy's config is valid // validateLowUtilizationStrategyConfig checks if the strategy's config is valid
func validateLowUtilizationStrategyConfig(thresholds, targetThresholds api.ResourceThresholds) error { func validateLowUtilizationStrategyConfig(thresholds, targetThresholds api.ResourceThresholds, useDeviationThresholds bool) error {
// validate thresholds and targetThresholds config // validate thresholds and targetThresholds config
if err := validateThresholds(thresholds); err != nil { if err := validateThresholds(thresholds); err != nil {
return fmt.Errorf("thresholds config is not valid: %v", err) return fmt.Errorf("thresholds config is not valid: %v", err)
@@ -181,7 +201,7 @@ func validateLowUtilizationStrategyConfig(thresholds, targetThresholds api.Resou
for resourceName, value := range thresholds { for resourceName, value := range thresholds {
if targetValue, ok := targetThresholds[resourceName]; !ok { if targetValue, ok := targetThresholds[resourceName]; !ok {
return fmt.Errorf("thresholds and targetThresholds configured different resources") return fmt.Errorf("thresholds and targetThresholds configured different resources")
} else if value > targetValue { } else if value > targetValue && !useDeviationThresholds {
return fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", resourceName) return fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", resourceName)
} }
} }

View File

@@ -48,6 +48,7 @@ func TestLowNodeUtilization(t *testing.T) {
testCases := []struct { testCases := []struct {
name string name string
useDeviationThresholds bool
thresholds, targetThresholds api.ResourceThresholds thresholds, targetThresholds api.ResourceThresholds
nodes []*v1.Node nodes []*v1.Node
pods []*v1.Pod pods []*v1.Pod
@@ -643,6 +644,57 @@ func TestLowNodeUtilization(t *testing.T) {
}, },
expectedPodsEvicted: 3, expectedPodsEvicted: 3,
}, },
{
name: "deviation thresholds",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 5,
v1.ResourcePods: 5,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 5,
v1.ResourcePods: 5,
},
useDeviationThresholds: true,
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
expectedPodsEvicted: 2,
evictedPods: []string{},
},
} }
for _, test := range testCases { for _, test := range testCases {
@@ -720,6 +772,7 @@ func TestLowNodeUtilization(t *testing.T) {
nil, nil,
nil, nil,
test.nodes, test.nodes,
getPodsAssignedToNode,
false, false,
false, false,
false, false,
@@ -731,8 +784,9 @@ func TestLowNodeUtilization(t *testing.T) {
Enabled: true, Enabled: true,
Params: &api.StrategyParameters{ Params: &api.StrategyParameters{
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{ NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
Thresholds: test.thresholds, Thresholds: test.thresholds,
TargetThresholds: test.targetThresholds, TargetThresholds: test.targetThresholds,
UseDeviationThresholds: test.useDeviationThresholds,
}, },
NodeFit: true, NodeFit: true,
}, },
@@ -890,7 +944,7 @@ func TestValidateLowNodeUtilizationStrategyConfig(t *testing.T) {
} }
for _, testCase := range tests { for _, testCase := range tests {
validateErr := validateLowUtilizationStrategyConfig(testCase.thresholds, testCase.targetThresholds) validateErr := validateLowUtilizationStrategyConfig(testCase.thresholds, testCase.targetThresholds, false)
if validateErr == nil || testCase.errInfo == nil { if validateErr == nil || testCase.errInfo == nil {
if validateErr != testCase.errInfo { if validateErr != testCase.errInfo {
@@ -1033,6 +1087,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
&item.evictionsExpected, &item.evictionsExpected,
nil, nil,
item.nodes, item.nodes,
getPodsAssignedToNode,
false, false,
false, false,
false, false,

View File

@@ -27,6 +27,8 @@ import (
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/descheduler/node"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
) )
@@ -36,12 +38,19 @@ type NodeUsage struct {
node *v1.Node node *v1.Node
usage map[v1.ResourceName]*resource.Quantity usage map[v1.ResourceName]*resource.Quantity
allPods []*v1.Pod allPods []*v1.Pod
}
type NodeThresholds struct {
lowResourceThreshold map[v1.ResourceName]*resource.Quantity lowResourceThreshold map[v1.ResourceName]*resource.Quantity
highResourceThreshold map[v1.ResourceName]*resource.Quantity highResourceThreshold map[v1.ResourceName]*resource.Quantity
} }
type continueEvictionCond func(nodeUsage NodeUsage, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool type NodeInfo struct {
NodeUsage
thresholds NodeThresholds
}
type continueEvictionCond func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool
// NodePodsMap is a set of (node, pods) pairs // NodePodsMap is a set of (node, pods) pairs
type NodePodsMap map[*v1.Node][]*v1.Pod type NodePodsMap map[*v1.Node][]*v1.Pod
@@ -66,7 +75,7 @@ func validateNodeUtilizationParams(params *api.StrategyParameters) error {
// validateThresholds checks if thresholds have valid resource name and resource percentage configured // validateThresholds checks if thresholds have valid resource name and resource percentage configured
func validateThresholds(thresholds api.ResourceThresholds) error { func validateThresholds(thresholds api.ResourceThresholds) error {
if thresholds == nil || len(thresholds) == 0 { if len(thresholds) == 0 {
return fmt.Errorf("no resource threshold is configured") return fmt.Errorf("no resource threshold is configured")
} }
for name, percent := range thresholds { for name, percent := range thresholds {
@@ -77,11 +86,65 @@ func validateThresholds(thresholds api.ResourceThresholds) error {
return nil return nil
} }
func getNodeUsage( func normalizePercentage(percent api.Percentage) api.Percentage {
if percent > MaxResourcePercentage {
return MaxResourcePercentage
}
if percent < MinResourcePercentage {
return MinResourcePercentage
}
return percent
}
func getNodeThresholds(
nodes []*v1.Node, nodes []*v1.Node,
lowThreshold, highThreshold api.ResourceThresholds, lowThreshold, highThreshold api.ResourceThresholds,
resourceNames []v1.ResourceName, resourceNames []v1.ResourceName,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
useDeviationThresholds bool,
) map[string]NodeThresholds {
nodeThresholdsMap := map[string]NodeThresholds{}
averageResourceUsagePercent := api.ResourceThresholds{}
if useDeviationThresholds {
averageResourceUsagePercent = averageNodeBasicresources(nodes, getPodsAssignedToNode, resourceNames)
}
for _, node := range nodes {
nodeCapacity := node.Status.Capacity
if len(node.Status.Allocatable) > 0 {
nodeCapacity = node.Status.Allocatable
}
nodeThresholdsMap[node.Name] = NodeThresholds{
lowResourceThreshold: map[v1.ResourceName]*resource.Quantity{},
highResourceThreshold: map[v1.ResourceName]*resource.Quantity{},
}
for _, resourceName := range resourceNames {
if useDeviationThresholds {
cap := nodeCapacity[resourceName]
if lowThreshold[resourceName] == MinResourcePercentage {
nodeThresholdsMap[node.Name].lowResourceThreshold[resourceName] = &cap
nodeThresholdsMap[node.Name].highResourceThreshold[resourceName] = &cap
} else {
nodeThresholdsMap[node.Name].lowResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, normalizePercentage(averageResourceUsagePercent[resourceName]-lowThreshold[resourceName]))
nodeThresholdsMap[node.Name].highResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, normalizePercentage(averageResourceUsagePercent[resourceName]+highThreshold[resourceName]))
}
} else {
nodeThresholdsMap[node.Name].lowResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, lowThreshold[resourceName])
nodeThresholdsMap[node.Name].highResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, highThreshold[resourceName])
}
}
}
return nodeThresholdsMap
}
func getNodeUsage(
nodes []*v1.Node,
resourceNames []v1.ResourceName,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) []NodeUsage { ) []NodeUsage {
var nodeUsageList []NodeUsage var nodeUsageList []NodeUsage
@@ -92,48 +155,37 @@ func getNodeUsage(
continue continue
} }
// A threshold is in percentages but in <0;100> interval.
// Performing `threshold * 0.01` will convert <0;100> interval into <0;1>.
// Multiplying it with capacity will give fraction of the capacity corresponding to the given high/low resource threshold in Quantity units.
nodeCapacity := node.Status.Capacity
if len(node.Status.Allocatable) > 0 {
nodeCapacity = node.Status.Allocatable
}
lowResourceThreshold := map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(int64(float64(lowThreshold[v1.ResourceCPU])*float64(nodeCapacity.Cpu().MilliValue())*0.01), resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(int64(float64(lowThreshold[v1.ResourceMemory])*float64(nodeCapacity.Memory().Value())*0.01), resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(int64(float64(lowThreshold[v1.ResourcePods])*float64(nodeCapacity.Pods().Value())*0.01), resource.DecimalSI),
}
for _, name := range resourceNames {
if !isBasicResource(name) {
cap := nodeCapacity[name]
lowResourceThreshold[name] = resource.NewQuantity(int64(float64(lowThreshold[name])*float64(cap.Value())*0.01), resource.DecimalSI)
}
}
highResourceThreshold := map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(int64(float64(highThreshold[v1.ResourceCPU])*float64(nodeCapacity.Cpu().MilliValue())*0.01), resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(int64(float64(highThreshold[v1.ResourceMemory])*float64(nodeCapacity.Memory().Value())*0.01), resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(int64(float64(highThreshold[v1.ResourcePods])*float64(nodeCapacity.Pods().Value())*0.01), resource.DecimalSI),
}
for _, name := range resourceNames {
if !isBasicResource(name) {
cap := nodeCapacity[name]
highResourceThreshold[name] = resource.NewQuantity(int64(float64(highThreshold[name])*float64(cap.Value())*0.01), resource.DecimalSI)
}
}
nodeUsageList = append(nodeUsageList, NodeUsage{ nodeUsageList = append(nodeUsageList, NodeUsage{
node: node, node: node,
usage: nodeUtilization(node, pods, resourceNames), usage: nodeutil.NodeUtilization(pods, resourceNames),
allPods: pods, allPods: pods,
lowResourceThreshold: lowResourceThreshold,
highResourceThreshold: highResourceThreshold,
}) })
} }
return nodeUsageList return nodeUsageList
} }
func resourceThreshold(nodeCapacity v1.ResourceList, resourceName v1.ResourceName, threshold api.Percentage) *resource.Quantity {
defaultFormat := resource.DecimalSI
if resourceName == v1.ResourceMemory {
defaultFormat = resource.BinarySI
}
resourceCapacityFraction := func(resourceNodeCapacity int64) int64 {
// A threshold is in percentages but in <0;100> interval.
// Performing `threshold * 0.01` will convert <0;100> interval into <0;1>.
// Multiplying it with capacity will give fraction of the capacity corresponding to the given resource threshold in Quantity units.
return int64(float64(threshold) * 0.01 * float64(resourceNodeCapacity))
}
resourceCapacityQuantity := nodeCapacity.Name(resourceName, defaultFormat)
if resourceName == v1.ResourceCPU {
return resource.NewMilliQuantity(resourceCapacityFraction(resourceCapacityQuantity.MilliValue()), defaultFormat)
}
return resource.NewQuantity(resourceCapacityFraction(resourceCapacityQuantity.Value()), defaultFormat)
}
func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 { func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
nodeCapacity := nodeUsage.node.Status.Capacity nodeCapacity := nodeUsage.node.Status.Capacity
if len(nodeUsage.node.Status.Allocatable) > 0 { if len(nodeUsage.node.Status.Allocatable) > 0 {
@@ -155,19 +207,24 @@ func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
// low and high thresholds, it is simply ignored. // low and high thresholds, it is simply ignored.
func classifyNodes( func classifyNodes(
nodeUsages []NodeUsage, nodeUsages []NodeUsage,
lowThresholdFilter, highThresholdFilter func(node *v1.Node, usage NodeUsage) bool, nodeThresholds map[string]NodeThresholds,
) ([]NodeUsage, []NodeUsage) { lowThresholdFilter, highThresholdFilter func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool,
lowNodes, highNodes := []NodeUsage{}, []NodeUsage{} ) ([]NodeInfo, []NodeInfo) {
lowNodes, highNodes := []NodeInfo{}, []NodeInfo{}
for _, nodeUsage := range nodeUsages { for _, nodeUsage := range nodeUsages {
if lowThresholdFilter(nodeUsage.node, nodeUsage) { nodeInfo := NodeInfo{
klog.V(2).InfoS("Node is underutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage)) NodeUsage: nodeUsage,
lowNodes = append(lowNodes, nodeUsage) thresholds: nodeThresholds[nodeUsage.node.Name],
} else if highThresholdFilter(nodeUsage.node, nodeUsage) { }
klog.V(2).InfoS("Node is overutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage)) if lowThresholdFilter(nodeUsage.node, nodeUsage, nodeThresholds[nodeUsage.node.Name]) {
highNodes = append(highNodes, nodeUsage) klog.InfoS("Node is underutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
lowNodes = append(lowNodes, nodeInfo)
} else if highThresholdFilter(nodeUsage.node, nodeUsage, nodeThresholds[nodeUsage.node.Name]) {
klog.InfoS("Node is overutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
highNodes = append(highNodes, nodeInfo)
} else { } else {
klog.V(2).InfoS("Node is appropriately utilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage)) klog.InfoS("Node is appropriately utilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
} }
} }
@@ -179,16 +236,13 @@ func classifyNodes(
// TODO: @ravig Break this function into smaller functions. // TODO: @ravig Break this function into smaller functions.
func evictPodsFromSourceNodes( func evictPodsFromSourceNodes(
ctx context.Context, ctx context.Context,
sourceNodes, destinationNodes []NodeUsage, sourceNodes, destinationNodes []NodeInfo,
podEvictor *evictions.PodEvictor, podEvictor *evictions.PodEvictor,
podFilter func(pod *v1.Pod) bool, podFilter func(pod *v1.Pod) bool,
resourceNames []v1.ResourceName, resourceNames []v1.ResourceName,
strategy string, strategy string,
continueEviction continueEvictionCond, continueEviction continueEvictionCond,
) { ) {
sortNodesByUsage(sourceNodes)
// upper bound on total number of pods/cpu/memory and optional extended resources to be moved // upper bound on total number of pods/cpu/memory and optional extended resources to be moved
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{ totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{
v1.ResourcePods: {}, v1.ResourcePods: {},
@@ -204,7 +258,7 @@ func evictPodsFromSourceNodes(
if _, ok := totalAvailableUsage[name]; !ok { if _, ok := totalAvailableUsage[name]; !ok {
totalAvailableUsage[name] = resource.NewQuantity(0, resource.DecimalSI) totalAvailableUsage[name] = resource.NewQuantity(0, resource.DecimalSI)
} }
totalAvailableUsage[name].Add(*node.highResourceThreshold[name]) totalAvailableUsage[name].Add(*node.thresholds.highResourceThreshold[name])
totalAvailableUsage[name].Sub(*node.usage[name]) totalAvailableUsage[name].Sub(*node.usage[name])
} }
} }
@@ -216,7 +270,7 @@ func evictPodsFromSourceNodes(
"Pods", totalAvailableUsage[v1.ResourcePods].Value(), "Pods", totalAvailableUsage[v1.ResourcePods].Value(),
} }
for name := range totalAvailableUsage { for name := range totalAvailableUsage {
if !isBasicResource(name) { if !node.IsBasicResource(name) {
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value()) keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
} }
} }
@@ -244,7 +298,7 @@ func evictPodsFromSourceNodes(
func evictPods( func evictPods(
ctx context.Context, ctx context.Context,
inputPods []*v1.Pod, inputPods []*v1.Pod,
nodeUsage NodeUsage, nodeInfo NodeInfo,
totalAvailableUsage map[v1.ResourceName]*resource.Quantity, totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
taintsOfLowNodes map[string][]v1.Taint, taintsOfLowNodes map[string][]v1.Taint,
podEvictor *evictions.PodEvictor, podEvictor *evictions.PodEvictor,
@@ -252,14 +306,14 @@ func evictPods(
continueEviction continueEvictionCond, continueEviction continueEvictionCond,
) { ) {
if continueEviction(nodeUsage, totalAvailableUsage) { if continueEviction(nodeInfo, totalAvailableUsage) {
for _, pod := range inputPods { for _, pod := range inputPods {
if !utils.PodToleratesTaints(pod, taintsOfLowNodes) { if !utils.PodToleratesTaints(pod, taintsOfLowNodes) {
klog.V(3).InfoS("Skipping eviction for pod, doesn't tolerate node taint", "pod", klog.KObj(pod)) klog.V(3).InfoS("Skipping eviction for pod, doesn't tolerate node taint", "pod", klog.KObj(pod))
continue continue
} }
success, err := podEvictor.EvictPod(ctx, pod, nodeUsage.node, strategy) success, err := podEvictor.EvictPod(ctx, pod, nodeInfo.node, strategy)
if err != nil { if err != nil {
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod)) klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
break break
@@ -270,30 +324,30 @@ func evictPods(
for name := range totalAvailableUsage { for name := range totalAvailableUsage {
if name == v1.ResourcePods { if name == v1.ResourcePods {
nodeUsage.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI)) nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
totalAvailableUsage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI)) totalAvailableUsage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
} else { } else {
quantity := utils.GetResourceRequestQuantity(pod, name) quantity := utils.GetResourceRequestQuantity(pod, name)
nodeUsage.usage[name].Sub(quantity) nodeInfo.usage[name].Sub(quantity)
totalAvailableUsage[name].Sub(quantity) totalAvailableUsage[name].Sub(quantity)
} }
} }
keysAndValues := []interface{}{ keysAndValues := []interface{}{
"node", nodeUsage.node.Name, "node", nodeInfo.node.Name,
"CPU", nodeUsage.usage[v1.ResourceCPU].MilliValue(), "CPU", nodeInfo.usage[v1.ResourceCPU].MilliValue(),
"Mem", nodeUsage.usage[v1.ResourceMemory].Value(), "Mem", nodeInfo.usage[v1.ResourceMemory].Value(),
"Pods", nodeUsage.usage[v1.ResourcePods].Value(), "Pods", nodeInfo.usage[v1.ResourcePods].Value(),
} }
for name := range totalAvailableUsage { for name := range totalAvailableUsage {
if !isBasicResource(name) { if !nodeutil.IsBasicResource(name) {
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value()) keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
} }
} }
klog.V(3).InfoS("Updated node usage", keysAndValues...) klog.V(3).InfoS("Updated node usage", keysAndValues...)
// check if pods can be still evicted // check if pods can be still evicted
if !continueEviction(nodeUsage, totalAvailableUsage) { if !continueEviction(nodeInfo, totalAvailableUsage) {
break break
} }
} }
@@ -301,31 +355,36 @@ func evictPods(
} }
} }
// sortNodesByUsage sorts nodes based on usage in descending order // sortNodesByUsage sorts nodes based on usage according to the given strategy.
func sortNodesByUsage(nodes []NodeUsage) { func sortNodesByUsage(nodes []NodeInfo, ascending bool) {
sort.Slice(nodes, func(i, j int) bool { sort.Slice(nodes, func(i, j int) bool {
ti := nodes[i].usage[v1.ResourceMemory].Value() + nodes[i].usage[v1.ResourceCPU].MilliValue() + nodes[i].usage[v1.ResourcePods].Value() ti := nodes[i].usage[v1.ResourceMemory].Value() + nodes[i].usage[v1.ResourceCPU].MilliValue() + nodes[i].usage[v1.ResourcePods].Value()
tj := nodes[j].usage[v1.ResourceMemory].Value() + nodes[j].usage[v1.ResourceCPU].MilliValue() + nodes[j].usage[v1.ResourcePods].Value() tj := nodes[j].usage[v1.ResourceMemory].Value() + nodes[j].usage[v1.ResourceCPU].MilliValue() + nodes[j].usage[v1.ResourcePods].Value()
// extended resources // extended resources
for name := range nodes[i].usage { for name := range nodes[i].usage {
if !isBasicResource(name) { if !nodeutil.IsBasicResource(name) {
ti = ti + nodes[i].usage[name].Value() ti = ti + nodes[i].usage[name].Value()
tj = tj + nodes[j].usage[name].Value() tj = tj + nodes[j].usage[name].Value()
} }
} }
// To return sorted in descending order // Return ascending order for HighNodeUtilization strategy
if ascending {
return ti < tj
}
// Return descending order for LowNodeUtilization strategy
return ti > tj return ti > tj
}) })
} }
// isNodeAboveTargetUtilization checks if a node is overutilized // isNodeAboveTargetUtilization checks if a node is overutilized
// At least one resource has to be above the high threshold // At least one resource has to be above the high threshold
func isNodeAboveTargetUtilization(usage NodeUsage) bool { func isNodeAboveTargetUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool {
for name, nodeValue := range usage.usage { for name, nodeValue := range usage.usage {
// usage.highResourceThreshold[name] < nodeValue // usage.highResourceThreshold[name] < nodeValue
if usage.highResourceThreshold[name].Cmp(*nodeValue) == -1 { if threshold[name].Cmp(*nodeValue) == -1 {
return true return true
} }
} }
@@ -334,10 +393,10 @@ func isNodeAboveTargetUtilization(usage NodeUsage) bool {
// isNodeWithLowUtilization checks if a node is underutilized // isNodeWithLowUtilization checks if a node is underutilized
// All resources have to be below the low threshold // All resources have to be below the low threshold
func isNodeWithLowUtilization(usage NodeUsage) bool { func isNodeWithLowUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool {
for name, nodeValue := range usage.usage { for name, nodeValue := range usage.usage {
// usage.lowResourceThreshold[name] < nodeValue // usage.lowResourceThreshold[name] < nodeValue
if usage.lowResourceThreshold[name].Cmp(*nodeValue) == -1 { if threshold[name].Cmp(*nodeValue) == -1 {
return false return false
} }
} }
@@ -354,43 +413,6 @@ func getResourceNames(thresholds api.ResourceThresholds) []v1.ResourceName {
return resourceNames return resourceNames
} }
// isBasicResource checks if resource is basic native.
func isBasicResource(name v1.ResourceName) bool {
switch name {
case v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods:
return true
default:
return false
}
}
func nodeUtilization(node *v1.Node, pods []*v1.Pod, resourceNames []v1.ResourceName) map[v1.ResourceName]*resource.Quantity {
totalReqs := map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
}
for _, name := range resourceNames {
if !isBasicResource(name) {
totalReqs[name] = resource.NewQuantity(0, resource.DecimalSI)
}
}
for _, pod := range pods {
req, _ := utils.PodRequestsAndLimits(pod)
for _, name := range resourceNames {
quantity, ok := req[name]
if ok && name != v1.ResourcePods {
// As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero,
// the format of the quantity will be updated to the format of y.
totalReqs[name].Add(quantity)
}
}
}
return totalReqs
}
func classifyPods(pods []*v1.Pod, filter func(pod *v1.Pod) bool) ([]*v1.Pod, []*v1.Pod) { func classifyPods(pods []*v1.Pod, filter func(pod *v1.Pod) bool) ([]*v1.Pod, []*v1.Pod) {
var nonRemovablePods, removablePods []*v1.Pod var nonRemovablePods, removablePods []*v1.Pod
@@ -404,3 +426,34 @@ func classifyPods(pods []*v1.Pod, filter func(pod *v1.Pod) bool) ([]*v1.Pod, []*
return nonRemovablePods, removablePods return nonRemovablePods, removablePods
} }
func averageNodeBasicresources(nodes []*v1.Node, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, resourceNames []v1.ResourceName) api.ResourceThresholds {
total := api.ResourceThresholds{}
average := api.ResourceThresholds{}
numberOfNodes := len(nodes)
for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, nil)
if err != nil {
numberOfNodes--
continue
}
usage := nodeutil.NodeUtilization(pods, resourceNames)
nodeCapacity := node.Status.Capacity
if len(node.Status.Allocatable) > 0 {
nodeCapacity = node.Status.Allocatable
}
for resource, value := range usage {
nodeCapacityValue := nodeCapacity[resource]
if resource == v1.ResourceCPU {
total[resource] += api.Percentage(value.MilliValue()) / api.Percentage(nodeCapacityValue.MilliValue()) * 100.0
} else {
total[resource] += api.Percentage(value.Value()) / api.Percentage(nodeCapacityValue.Value()) * 100.0
}
}
}
for resource, value := range total {
average[resource] = value / api.Percentage(numberOfNodes)
}
return average
}

View File

@@ -18,17 +18,91 @@ package nodeutilization
import ( import (
"fmt" "fmt"
"math"
"testing"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"math" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"testing"
) )
var ( var (
lowPriority = int32(0) lowPriority = int32(0)
highPriority = int32(10000) highPriority = int32(10000)
extendedResource = v1.ResourceName("example.com/foo") extendedResource = v1.ResourceName("example.com/foo")
testNode1 = NodeInfo{
NodeUsage: NodeUsage{
node: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
},
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
},
usage: map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
},
},
}
testNode2 = NodeInfo{
NodeUsage: NodeUsage{
node: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
},
ObjectMeta: metav1.ObjectMeta{Name: "node2"},
},
usage: map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
},
},
}
testNode3 = NodeInfo{
NodeUsage: NodeUsage{
node: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
},
ObjectMeta: metav1.ObjectMeta{Name: "node3"},
},
usage: map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
},
},
}
) )
func TestValidateThresholds(t *testing.T) { func TestValidateThresholds(t *testing.T) {
@@ -156,3 +230,27 @@ func TestResourceUsagePercentages(t *testing.T) {
t.Logf("resourceUsagePercentage: %#v\n", resourceUsagePercentage) t.Logf("resourceUsagePercentage: %#v\n", resourceUsagePercentage)
} }
func TestSortNodesByUsageDescendingOrder(t *testing.T) {
nodeList := []NodeInfo{testNode1, testNode2, testNode3}
expectedNodeList := []NodeInfo{testNode3, testNode1, testNode2} // testNode3 has the highest usage
sortNodesByUsage(nodeList, false) // ascending=false, sort nodes in descending order
for i := 0; i < len(expectedNodeList); i++ {
if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name {
t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name)
}
}
}
func TestSortNodesByUsageAscendingOrder(t *testing.T) {
nodeList := []NodeInfo{testNode1, testNode2, testNode3}
expectedNodeList := []NodeInfo{testNode2, testNode1, testNode3}
sortNodesByUsage(nodeList, true) // ascending=true, sort nodes in ascending order
for i := 0; i < len(expectedNodeList); i++ {
if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name {
t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name)
}
}
}

View File

@@ -47,6 +47,7 @@ func TestPodAntiAffinity(t *testing.T) {
Unschedulable: true, Unschedulable: true,
} }
}) })
node4 := test.BuildTestNode("n4", 2, 2, 1, nil)
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil) p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil) p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
@@ -174,6 +175,14 @@ func TestPodAntiAffinity(t *testing.T) {
nodes: []*v1.Node{node1}, nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
}, },
{
description: "Won't evict pods because only other node doesn't have enough resources",
maxPodsToEvictPerNode: &uint3,
pods: []*v1.Pod{p1, p2, p3, p4},
nodes: []*v1.Node{node1, node4},
expectedEvictedPodCount: 0,
nodeFit: true,
},
} }
for _, test := range tests { for _, test := range tests {
@@ -209,6 +218,7 @@ func TestPodAntiAffinity(t *testing.T) {
test.maxPodsToEvictPerNode, test.maxPodsToEvictPerNode,
test.maxNoOfPodsToEvictPerNamespace, test.maxNoOfPodsToEvictPerNamespace,
test.nodes, test.nodes,
getPodsAssignedToNode,
false, false,
false, false,
false, false,

View File

@@ -298,6 +298,7 @@ func TestPodLifeTime(t *testing.T) {
nil, nil,
nil, nil,
tc.nodes, tc.nodes,
getPodsAssignedToNode,
false, false,
false, false,
tc.ignorePvcPods, tc.ignorePvcPods,

View File

@@ -97,8 +97,10 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
Unschedulable: true, Unschedulable: true,
} }
}) })
node4 := test.BuildTestNode("node4", 200, 3000, 10, nil)
node5 := test.BuildTestNode("node5", 2000, 3000, 10, nil)
pods := initPods(node1) pods := append(append(initPods(node1), test.BuildTestPod("CPU-consumer-1", 150, 100, node4.Name, nil)), test.BuildTestPod("CPU-consumer-2", 150, 100, node5.Name, nil))
createStrategy := func(enabled, includingInitContainers bool, restartThresholds int32, nodeFit bool) api.DeschedulerStrategy { createStrategy := func(enabled, includingInitContainers bool, restartThresholds int32, nodeFit bool) api.DeschedulerStrategy {
return api.DeschedulerStrategy{ return api.DeschedulerStrategy{
@@ -199,6 +201,20 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
expectedEvictedPodCount: 0, expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3, maxPodsToEvictPerNode: &uint3,
}, },
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node does not have enough CPU, 0 pod evictions",
strategy: createStrategy(true, true, 1, true),
nodes: []*v1.Node{node1, node4},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node has enough CPU, 3 pod evictions",
strategy: createStrategy(true, true, 1, true),
nodes: []*v1.Node{node1, node5},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
},
} }
for _, tc := range tests { for _, tc := range tests {
@@ -234,6 +250,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
tc.maxPodsToEvictPerNode, tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace, tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes, tc.nodes,
getPodsAssignedToNode,
false, false,
false, false,
false, false,

View File

@@ -18,20 +18,18 @@ package strategies
import ( import (
"context" "context"
"fmt"
"math" "math"
"sort" "sort"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/validation" "sigs.k8s.io/descheduler/pkg/descheduler/strategies/validation"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
@@ -170,7 +168,7 @@ func RemovePodsViolatingTopologySpreadConstraint(
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint) klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint)
continue continue
} }
balanceDomains(podsForEviction, constraint, constraintTopologies, sumPods, evictable.IsEvictable, nodeMap) balanceDomains(client, getPodsAssignedToNode, podsForEviction, constraint, constraintTopologies, sumPods, evictable.IsEvictable, nodes)
} }
} }
@@ -225,12 +223,14 @@ func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, constraint v1.Topol
// [5, 5, 5, 5, 5, 5] // [5, 5, 5, 5, 5, 5]
// (assuming even distribution by the scheduler of the evicted pods) // (assuming even distribution by the scheduler of the evicted pods)
func balanceDomains( func balanceDomains(
client clientset.Interface,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
podsForEviction map[*v1.Pod]struct{}, podsForEviction map[*v1.Pod]struct{},
constraint v1.TopologySpreadConstraint, constraint v1.TopologySpreadConstraint,
constraintTopologies map[topologyPair][]*v1.Pod, constraintTopologies map[topologyPair][]*v1.Pod,
sumPods float64, sumPods float64,
isEvictable func(*v1.Pod) bool, isEvictable func(pod *v1.Pod) bool,
nodeMap map[string]*v1.Node) { nodes []*v1.Node) {
idealAvg := sumPods / float64(len(constraintTopologies)) idealAvg := sumPods / float64(len(constraintTopologies))
sortedDomains := sortDomains(constraintTopologies, isEvictable) sortedDomains := sortDomains(constraintTopologies, isEvictable)
@@ -273,8 +273,19 @@ func balanceDomains(
// also (just for tracking), add them to the list of pods in the lower topology // also (just for tracking), add them to the list of pods in the lower topology
aboveToEvict := sortedDomains[j].pods[len(sortedDomains[j].pods)-movePods:] aboveToEvict := sortedDomains[j].pods[len(sortedDomains[j].pods)-movePods:]
for k := range aboveToEvict { for k := range aboveToEvict {
if err := validatePodFitsOnOtherNodes(aboveToEvict[k], nodeMap); err != nil { // PodFitsAnyOtherNode excludes the current node because, for the sake of domain balancing only, we care about if there is any other
klog.V(2).InfoS(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(aboveToEvict[k])) // place it could theoretically fit.
// If the pod doesn't fit on its current node, that is a job for RemovePodsViolatingNodeAffinity, and irrelevant to Topology Spreading
// Also, if the pod has a hard nodeAffinity/nodeSelector/toleration that only matches this node,
// don't bother evicting it as it will just end up back on the same node
// however we still account for it "being evicted" so the algorithm can complete
// TODO(@damemi): Since we don't order pods wrt their affinities, we should refactor this to skip the current pod
// but still try to get the required # of movePods (instead of just chopping that value off the slice above).
// In other words, PTS can perform suboptimally if some of its chosen pods don't fit on other nodes.
// This is because the chosen pods aren't sorted, but immovable pods still count as "evicted" toward the PTS algorithm.
// So, a better selection heuristic could improve performance.
if !node.PodFitsAnyOtherNode(getPodsAssignedToNode, aboveToEvict[k], nodes) {
klog.V(2).InfoS("ignoring pod for eviction as it does not fit on any other node", "pod", klog.KObj(aboveToEvict[k]))
continue continue
} }
@@ -285,56 +296,6 @@ func balanceDomains(
} }
} }
// validatePodFitsOnOtherNodes performs validation based on scheduling predicates for affinity and toleration.
// It excludes the current node because, for the sake of domain balancing only, we care about if there is any other
// place it could theoretically fit.
// If the pod doesn't fit on its current node, that is a job for RemovePodsViolatingNodeAffinity, and irrelevant to Topology Spreading
func validatePodFitsOnOtherNodes(pod *v1.Pod, nodeMap map[string]*v1.Node) error {
// if the pod has a hard nodeAffinity/nodeSelector/toleration that only matches this node,
// don't bother evicting it as it will just end up back on the same node
// however we still account for it "being evicted" so the algorithm can complete
// TODO(@damemi): Since we don't order pods wrt their affinities, we should refactor this to skip the current pod
// but still try to get the required # of movePods (instead of just chopping that value off the slice above)
isRequiredDuringSchedulingIgnoredDuringExecution := pod.Spec.Affinity != nil &&
pod.Spec.Affinity.NodeAffinity != nil &&
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil
hardTaintsFilter := func(taint *v1.Taint) bool {
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
}
var eligibleNodesCount, ineligibleAffinityNodesCount, ineligibleTaintedNodesCount int
for _, node := range nodeMap {
if node == nodeMap[pod.Spec.NodeName] {
continue
}
if pod.Spec.NodeSelector != nil || isRequiredDuringSchedulingIgnoredDuringExecution {
if !nodeutil.PodFitsCurrentNode(pod, node) {
ineligibleAffinityNodesCount++
continue
}
}
if !utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, hardTaintsFilter) {
ineligibleTaintedNodesCount++
continue
}
eligibleNodesCount++
}
if eligibleNodesCount == 0 {
var errs []error
if ineligibleAffinityNodesCount > 0 {
errs = append(errs, fmt.Errorf("%d nodes with ineligible selector/affinity", ineligibleAffinityNodesCount))
}
if ineligibleTaintedNodesCount > 0 {
errs = append(errs, fmt.Errorf("%d nodes with taints that are not tolerated", ineligibleTaintedNodesCount))
}
return utilerrors.NewAggregate(errs)
}
return nil
}
// sortDomains sorts and splits the list of topology domains based on their size // sortDomains sorts and splits the list of topology domains based on their size
// it also sorts the list of pods within the domains based on their node affinity/selector and priority in the following order: // it also sorts the list of pods within the domains based on their node affinity/selector and priority in the following order:
// 1. non-evictable pods // 1. non-evictable pods
@@ -342,7 +303,7 @@ func validatePodFitsOnOtherNodes(pod *v1.Pod, nodeMap map[string]*v1.Node) error
// 3. pods in descending priority // 3. pods in descending priority
// 4. all other pods // 4. all other pods
// We then pop pods off the back of the list for eviction // We then pop pods off the back of the list for eviction
func sortDomains(constraintTopologyPairs map[topologyPair][]*v1.Pod, isEvictable func(*v1.Pod) bool) []topology { func sortDomains(constraintTopologyPairs map[topologyPair][]*v1.Pod, isEvictable func(pod *v1.Pod) bool) []topology {
sortedTopologies := make([]topology, 0, len(constraintTopologyPairs)) sortedTopologies := make([]topology, 0, len(constraintTopologyPairs))
// sort the topologies and return 2 lists: those <= the average and those > the average (> list inverted) // sort the topologies and return 2 lists: those <= the average and those > the average (> list inverted)
for pair, list := range constraintTopologyPairs { for pair, list := range constraintTopologyPairs {

View File

@@ -483,6 +483,38 @@ func TestTopologySpreadConstraint(t *testing.T) {
}, },
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
}, },
{
name: "2 domains size [2 6], maxSkew=2, can't move any because node1 does not have enough CPU",
nodes: []*v1.Node{
test.BuildTestNode("n1", 200, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
},
pods: createTestPods([]testPodList{
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(2),
},
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
},
{
count: 6,
node: "n2",
labels: map[string]string{"foo": "bar"},
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: true,
},
},
namespaces: []string{"ns1"},
},
{ {
// see https://github.com/kubernetes-sigs/descheduler/issues/564 // see https://github.com/kubernetes-sigs/descheduler/issues/564
name: "Multiple constraints (6 nodes/2 zones, 4 pods)", name: "Multiple constraints (6 nodes/2 zones, 4 pods)",
@@ -686,7 +718,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
}, },
{ {
name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods since pod does not tolerate the tainted node", name: "2 domains, sizes [2,0], maxSkew=1, move 1 pods since pod does not tolerate the tainted node",
nodes: []*v1.Node{ nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }), test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) {
@@ -718,6 +750,43 @@ func TestTopologySpreadConstraint(t *testing.T) {
strategy: api.DeschedulerStrategy{}, strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"}, namespaces: []string{"ns1"},
}, },
{
name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods since pod does not tolerate the tainted node, and NodeFit is enabled",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) {
n.Labels["zone"] = "zoneB"
n.Spec.Taints = []v1.Taint{
{
Key: "taint-test",
Value: "test",
Effect: v1.TaintEffectNoSchedule,
},
}
}),
},
pods: createTestPods([]testPodList{
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
nodeSelector: map[string]string{"zone": "zoneA"},
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: true,
},
},
namespaces: []string{"ns1"},
},
{ {
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with PreferNoSchedule Taint", name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with PreferNoSchedule Taint",
nodes: []*v1.Node{ nodes: []*v1.Node{
@@ -902,6 +971,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
nil, nil,
nil, nil,
tc.nodes, tc.nodes,
getPodsAssignedToNode,
false, false,
false, false,
false, false,

View File

@@ -6,25 +6,9 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/featuregate"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
const (
// owner: @jinxu
// beta: v1.10
//
// New local storage types to support local storage capacity isolation
LocalStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation"
// owner: @egernst
// alpha: v1.16
//
// Enables PodOverhead, for accounting pod overheads which are specific to a given RuntimeClass
PodOverhead featuregate.Feature = "PodOverhead"
)
// GetResourceRequest finds and returns the request value for a specific resource. // GetResourceRequest finds and returns the request value for a specific resource.
func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 { func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
if resource == v1.ResourcePods { if resource == v1.ResourcePods {
@@ -53,11 +37,6 @@ func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resou
requestQuantity = resource.Quantity{Format: resource.DecimalSI} requestQuantity = resource.Quantity{Format: resource.DecimalSI}
} }
if resourceName == v1.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(LocalStorageCapacityIsolation) {
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk
return requestQuantity
}
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
if rQuantity, ok := container.Resources.Requests[resourceName]; ok { if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
requestQuantity.Add(rQuantity) requestQuantity.Add(rQuantity)
@@ -72,9 +51,9 @@ func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resou
} }
} }
// if PodOverhead feature is supported, add overhead for running a pod // We assume pod overhead feature gate is enabled.
// to the total requests if the resource total is non-zero // We can't import the scheduler settings so we will inherit the default.
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(PodOverhead) { if pod.Spec.Overhead != nil {
if podOverhead, ok := pod.Spec.Overhead[resourceName]; ok && !requestQuantity.IsZero() { if podOverhead, ok := pod.Spec.Overhead[resourceName]; ok && !requestQuantity.IsZero() {
requestQuantity.Add(podOverhead) requestQuantity.Add(podOverhead)
} }
@@ -162,9 +141,9 @@ func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) {
maxResourceList(limits, container.Resources.Limits) maxResourceList(limits, container.Resources.Limits)
} }
// if PodOverhead feature is supported, add overhead for running a pod // We assume pod overhead feature gate is enabled.
// to the sum of reqeuests and to non-zero limits: // We can't import the scheduler settings so we will inherit the default.
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(PodOverhead) { if pod.Spec.Overhead != nil {
addResourceList(reqs, pod.Spec.Overhead) addResourceList(reqs, pod.Spec.Overhead)
for name, quantity := range pod.Spec.Overhead { for name, quantity := range pod.Spec.Overhead {
@@ -207,12 +186,31 @@ func maxResourceList(list, new v1.ResourceList) {
// PodToleratesTaints returns true if a pod tolerates one node's taints // PodToleratesTaints returns true if a pod tolerates one node's taints
func PodToleratesTaints(pod *v1.Pod, taintsOfNodes map[string][]v1.Taint) bool { func PodToleratesTaints(pod *v1.Pod, taintsOfNodes map[string][]v1.Taint) bool {
for nodeName, taintsForNode := range taintsOfNodes {
if len(pod.Spec.Tolerations) >= len(taintsForNode) && TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taintsForNode, nil) {
return true
}
klog.V(5).InfoS("Pod doesn't tolerate nodes taint", "pod", klog.KObj(pod), "nodeName", nodeName)
}
for nodeName, taintsForNode := range taintsOfNodes {
if len(pod.Spec.Tolerations) >= len(taintsForNode) {
if TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taintsForNode, nil) {
return true
}
if klog.V(5).Enabled() {
for i := range taintsForNode {
if !TolerationsTolerateTaint(pod.Spec.Tolerations, &taintsForNode[i]) {
klog.V(5).InfoS("Pod doesn't tolerate node taint",
"pod", klog.KObj(pod),
"nodeName", nodeName,
"taint", fmt.Sprintf("%s:%s=%s", taintsForNode[i].Key, taintsForNode[i].Value, taintsForNode[i].Effect),
)
}
}
}
} else {
klog.V(5).InfoS("Pod doesn't tolerate nodes taint, count mismatch",
"pod", klog.KObj(pod),
"nodeName", nodeName,
)
}
}
return false return false
} }

View File

@@ -20,16 +20,12 @@ import (
"context" "context"
"strings" "strings"
"testing" "testing"
"time"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api" deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils" eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
@@ -109,7 +105,8 @@ func TestRemoveDuplicates(t *testing.T) {
Name: "sample", Name: "sample",
VolumeSource: v1.VolumeSource{ VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)}, SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
}, },
}, },
} }
@@ -147,6 +144,7 @@ func TestRemoveDuplicates(t *testing.T) {
nil, nil,
nil, nil,
nodes, nodes,
getPodsAssignedToNode,
true, true,
false, false,
false, false,
@@ -177,27 +175,3 @@ func TestRemoveDuplicates(t *testing.T) {
}) })
} }
} }
func waitForPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, desireRunningPodNum int, namespace string) {
if err := wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) {
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labelMap).String(),
})
if err != nil {
return false, err
}
if len(podList.Items) != desireRunningPodNum {
t.Logf("Waiting for %v pods to be running, got %v instead", desireRunningPodNum, len(podList.Items))
return false, nil
}
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodRunning {
t.Logf("Pod %v not running yet, is %v instead", pod.Name, pod.Status.Phase)
return false, nil
}
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for pods running: %v", err)
}
}

View File

@@ -83,7 +83,7 @@ func TestFailedPods(t *testing.T) {
defer jobClient.Delete(ctx, job.Name, metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy}) defer jobClient.Delete(ctx, job.Name, metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy})
waitForJobPodPhase(ctx, t, clientSet, job, v1.PodFailed) waitForJobPodPhase(ctx, t, clientSet, job, v1.PodFailed)
podEvictor := initPodEvictorOrFail(t, clientSet, nodes) podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
t.Logf("Running RemoveFailedPods strategy for %s", name) t.Logf("Running RemoveFailedPods strategy for %s", name)
strategies.RemoveFailedPods( strategies.RemoveFailedPods(

View File

@@ -0,0 +1,201 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"os"
"reflect"
"strings"
"testing"
"time"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/descheduler"
)
func TestLeaderElection(t *testing.T) {
ctx := context.Background()
clientSet, _, _, stopCh := initializeClient(t)
defer close(stopCh)
ns1 := "e2e-" + strings.ToLower(t.Name()+"-a")
ns2 := "e2e-" + strings.ToLower(t.Name()+"-b")
t.Logf("Creating testing namespace %v", ns1)
testNamespace1 := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns1}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace1, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace1.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace1.Name, metav1.DeleteOptions{})
t.Logf("Creating testing namespace %v", ns2)
testNamespace2 := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns2}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace2, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace2.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace2.Name, metav1.DeleteOptions{})
deployment1, err := createDeployment(ctx, clientSet, ns1, 5, t)
if err != nil {
t.Fatalf("create deployment 1: %v", err)
}
defer clientSet.AppsV1().Deployments(deployment1.Namespace).Delete(ctx, deployment1.Name, metav1.DeleteOptions{})
deployment2, err := createDeployment(ctx, clientSet, ns2, 5, t)
if err != nil {
t.Fatalf("create deployment 2: %v", err)
}
defer clientSet.AppsV1().Deployments(deployment2.Namespace).Delete(ctx, deployment2.Name, metav1.DeleteOptions{})
waitForPodsRunning(ctx, t, clientSet, map[string]string{"test": "leaderelection", "name": "test-leaderelection"}, 5, ns1)
podListAOrg := getPodNameList(ctx, clientSet, ns1, t)
waitForPodsRunning(ctx, t, clientSet, map[string]string{"test": "leaderelection", "name": "test-leaderelection"}, 5, ns2)
podListBOrg := getPodNameList(ctx, clientSet, ns2, t)
s1, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("unable to initialize server: %v", err)
}
s1.Client = clientSet
s1.DeschedulingInterval = 5 * time.Second
s1.LeaderElection.LeaderElect = true
s1.KubeconfigFile = os.Getenv("KUBECONFIG")
s1.PolicyConfigFile = "./policy_leaderelection_a.yaml"
s2, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("unable to initialize server: %v", err)
}
s2.Client = clientSet
s2.DeschedulingInterval = 5 * time.Second
s2.LeaderElection.LeaderElect = true
s2.KubeconfigFile = os.Getenv("KUBECONFIG")
s2.PolicyConfigFile = "./policy_leaderelection_b.yaml"
t.Log("starting deschedulers")
go func() {
err := descheduler.Run(ctx, s1)
if err != nil {
t.Errorf("unable to start descheduler: %v", err)
return
}
}()
time.Sleep(1 * time.Second)
go func() {
err := descheduler.Run(ctx, s2)
if err != nil {
t.Errorf("unable to start descheduler: %v", err)
return
}
}()
defer clientSet.CoordinationV1().Leases(s1.LeaderElection.ResourceNamespace).Delete(ctx, s1.LeaderElection.ResourceName, metav1.DeleteOptions{})
defer clientSet.CoordinationV1().Leases(s2.LeaderElection.ResourceNamespace).Delete(ctx, s2.LeaderElection.ResourceName, metav1.DeleteOptions{})
// wait for a while so all the pods are 5 seconds older
time.Sleep(7 * time.Second)
// validate only pods from e2e-testleaderelection-a namespace are evicted.
podListA := getPodNameList(ctx, clientSet, ns1, t)
podListB := getPodNameList(ctx, clientSet, ns2, t)
left := reflect.DeepEqual(podListAOrg, podListA)
right := reflect.DeepEqual(podListBOrg, podListB)
singleNamespaceEvicted := (left && !right) || (!left && right)
if singleNamespaceEvicted {
if !left {
t.Logf("Only the pods in %s namespace are evicted. Pods before: %s, Pods after %s", ns1, podListAOrg, podListA)
} else {
t.Logf("Only the pods in %s namespace are evicted. Pods before: %s, Pods after %s", ns2, podListBOrg, podListB)
}
} else {
t.Fatalf("Pods are evicted in both namespaces. For %s namespace Pods before: %s, Pods after %s. And, for %s namespace Pods before: %s, Pods after: %s", ns1, podListAOrg, podListA, ns2, podListBOrg, podListB)
}
}
func createDeployment(ctx context.Context, clientSet clientset.Interface, namespace string, replicas int32, t *testing.T) (*appsv1.Deployment, error) {
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "leaderelection",
Namespace: namespace,
Labels: map[string]string{"test": "leaderelection", "name": "test-leaderelection"},
},
Spec: appsv1.DeploymentSpec{
Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"test": "leaderelection", "name": "test-leaderelection"},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"test": "leaderelection", "name": "test-leaderelection"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "pause",
ImagePullPolicy: "Always",
Image: "kubernetes/pause",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
}},
},
},
},
}
t.Logf("Creating deployment %v for namespace %s", deployment.Name, deployment.Namespace)
deployment, err := clientSet.AppsV1().Deployments(deployment.Namespace).Create(ctx, deployment, metav1.CreateOptions{})
if err != nil {
t.Logf("Error creating deployment: %v", err)
if err = clientSet.AppsV1().Deployments(deployment.Namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"test": "leaderelection", "name": "test-leaderelection"})).String(),
}); err != nil {
t.Fatalf("Unable to delete deployment: %v", err)
}
return nil, fmt.Errorf("create deployment %v", err)
}
return deployment, nil
}
func getPodNameList(ctx context.Context, clientSet clientset.Interface, namespace string, t *testing.T) []string {
podList, err := clientSet.CoreV1().Pods(namespace).List(
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"test": "leaderelection", "name": "test-leaderelection"})).String()})
if err != nil {
t.Fatalf("Unable to list pods from ns: %s: %v", namespace, err)
}
podNames := make([]string, len(podList.Items))
for i, pod := range podList.Items {
podNames[i] = pod.Name
}
return podNames
}

View File

@@ -39,7 +39,6 @@ import (
v1qos "k8s.io/kubectl/pkg/util/qos" v1qos "k8s.io/kubectl/pkg/util/qos"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api" deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler" "sigs.k8s.io/descheduler/pkg/descheduler"
"sigs.k8s.io/descheduler/pkg/descheduler/client" "sigs.k8s.io/descheduler/pkg/descheduler/client"
@@ -199,6 +198,7 @@ func runPodLifetimeStrategy(
nil, nil,
nil, nil,
nodes, nodes,
getPodsAssignedToNode,
false, false,
evictCritical, evictCritical,
false, false,
@@ -324,7 +324,7 @@ func TestLowNodeUtilization(t *testing.T) {
waitForRCPodsRunning(ctx, t, clientSet, rc) waitForRCPodsRunning(ctx, t, clientSet, rc)
// Run LowNodeUtilization strategy // Run LowNodeUtilization strategy
podEvictor := initPodEvictorOrFail(t, clientSet, nodes) podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
podFilter, err := podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc() podFilter, err := podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc()
if err != nil { if err != nil {
@@ -517,8 +517,8 @@ func TestEvictSystemCriticalPriorityClass(t *testing.T) {
} }
func testEvictSystemCritical(t *testing.T, isPriorityClass bool) { func testEvictSystemCritical(t *testing.T, isPriorityClass bool) {
var highPriority = int32(1000) highPriority := int32(1000)
var lowPriority = int32(500) lowPriority := int32(500)
ctx := context.Background() ctx := context.Background()
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t) clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
@@ -647,8 +647,8 @@ func TestThresholdPriorityClass(t *testing.T) {
} }
func testPriority(t *testing.T, isPriorityClass bool) { func testPriority(t *testing.T, isPriorityClass bool) {
var highPriority = int32(1000) highPriority := int32(1000)
var lowPriority = int32(500) lowPriority := int32(500)
ctx := context.Background() ctx := context.Background()
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t) clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
@@ -745,7 +745,7 @@ func testPriority(t *testing.T, isPriorityClass bool) {
t.Fatalf("None of %v high priority pods are expected to be deleted", expectReservePodNames) t.Fatalf("None of %v high priority pods are expected to be deleted", expectReservePodNames)
} }
//check if all pods with low priority class are evicted // check if all pods with low priority class are evicted
if err := wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) { if err := wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) {
podListLowPriority, err := clientSet.CoreV1().Pods(rcLowPriority.Namespace).List( podListLowPriority, err := clientSet.CoreV1().Pods(rcLowPriority.Namespace).List(
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcLowPriority.Spec.Template.Labels).String()}) ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcLowPriority.Spec.Template.Labels).String()})
@@ -848,7 +848,7 @@ func TestPodLabelSelector(t *testing.T) {
t.Fatalf("None of %v unevictable pods are expected to be deleted", expectReservePodNames) t.Fatalf("None of %v unevictable pods are expected to be deleted", expectReservePodNames)
} }
//check if all selected pods are evicted // check if all selected pods are evicted
if err := wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) { if err := wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) {
podListEvict, err := clientSet.CoreV1().Pods(rcEvict.Namespace).List( podListEvict, err := clientSet.CoreV1().Pods(rcEvict.Namespace).List(
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcEvict.Spec.Template.Labels).String()}) ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcEvict.Spec.Template.Labels).String()})
@@ -886,17 +886,6 @@ func TestEvictAnnotation(t *testing.T) {
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t) clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh) defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing node with %v", err)
}
var nodes []*v1.Node
for i := range nodeList.Items {
node := nodeList.Items[i]
nodes = append(nodes, &node)
}
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}} testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil { if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name) t.Fatalf("Unable to create ns %v", testNamespace.Name)
@@ -911,7 +900,8 @@ func TestEvictAnnotation(t *testing.T) {
Name: "sample", Name: "sample",
VolumeSource: v1.VolumeSource{ VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)}, SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
}, },
}, },
} }
@@ -976,7 +966,7 @@ func TestDeschedulingInterval(t *testing.T) {
} }
s.Client = clientSet s.Client = clientSet
deschedulerPolicy := &api.DeschedulerPolicy{} deschedulerPolicy := &deschedulerapi.DeschedulerPolicy{}
c := make(chan bool, 1) c := make(chan bool, 1)
go func() { go func() {
@@ -984,9 +974,7 @@ func TestDeschedulingInterval(t *testing.T) {
if err != nil || len(evictionPolicyGroupVersion) == 0 { if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Errorf("Error when checking support for eviction: %v", err) t.Errorf("Error when checking support for eviction: %v", err)
} }
if err := descheduler.RunDeschedulerStrategies(ctx, s, deschedulerPolicy, evictionPolicyGroupVersion); err != nil {
stopChannel := make(chan struct{})
if err := descheduler.RunDeschedulerStrategies(ctx, s, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel); err != nil {
t.Errorf("Error running descheduler strategies: %+v", err) t.Errorf("Error running descheduler strategies: %+v", err)
} }
c <- true c <- true
@@ -1145,8 +1133,8 @@ func createBalancedPodForNodes(
// find the max, if the node has the max,use the one, if not,use the ratio parameter // find the max, if the node has the max,use the one, if not,use the ratio parameter
var maxCPUFraction, maxMemFraction float64 = ratio, ratio var maxCPUFraction, maxMemFraction float64 = ratio, ratio
var cpuFractionMap = make(map[string]float64) cpuFractionMap := make(map[string]float64)
var memFractionMap = make(map[string]float64) memFractionMap := make(map[string]float64)
for _, node := range nodes { for _, node := range nodes {
cpuFraction, memFraction, _, _ := computeCPUMemFraction(t, cs, node, podRequestedResource) cpuFraction, memFraction, _, _ := computeCPUMemFraction(t, cs, node, podRequestedResource)
@@ -1183,7 +1171,7 @@ func createBalancedPodForNodes(
// add crioMinMemLimit to ensure that all pods are setting at least that much for a limit, while keeping the same ratios // add crioMinMemLimit to ensure that all pods are setting at least that much for a limit, while keeping the same ratios
needCreateResource[v1.ResourceMemory] = *resource.NewQuantity(int64((ratio-memFraction)*float64(memAllocatableVal)+float64(crioMinMemLimit)), resource.BinarySI) needCreateResource[v1.ResourceMemory] = *resource.NewQuantity(int64((ratio-memFraction)*float64(memAllocatableVal)+float64(crioMinMemLimit)), resource.BinarySI)
var gracePeriod = int64(1) gracePeriod := int64(1)
// Don't set OwnerReferences to avoid pod eviction // Don't set OwnerReferences to avoid pod eviction
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@@ -1313,20 +1301,44 @@ func waitForPodRunning(ctx context.Context, t *testing.T, clientSet clientset.In
} }
} }
func waitForPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, desireRunningPodNum int, namespace string) {
if err := wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) {
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labelMap).String(),
})
if err != nil {
return false, err
}
if len(podList.Items) != desireRunningPodNum {
t.Logf("Waiting for %v pods to be running, got %v instead", desireRunningPodNum, len(podList.Items))
return false, nil
}
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodRunning {
t.Logf("Pod %v not running yet, is %v instead", pod.Name, pod.Status.Phase)
return false, nil
}
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for pods running: %v", err)
}
}
func splitNodesAndWorkerNodes(nodes []v1.Node) ([]*v1.Node, []*v1.Node) { func splitNodesAndWorkerNodes(nodes []v1.Node) ([]*v1.Node, []*v1.Node) {
var allNodes []*v1.Node var allNodes []*v1.Node
var workerNodes []*v1.Node var workerNodes []*v1.Node
for i := range nodes { for i := range nodes {
node := nodes[i] node := nodes[i]
allNodes = append(allNodes, &node) allNodes = append(allNodes, &node)
if _, exists := node.Labels["node-role.kubernetes.io/master"]; !exists { if _, exists := node.Labels["node-role.kubernetes.io/control-plane"]; !exists {
workerNodes = append(workerNodes, &node) workerNodes = append(workerNodes, &node)
} }
} }
return allNodes, workerNodes return allNodes, workerNodes
} }
func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, nodes []*v1.Node) *evictions.PodEvictor { func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, nodes []*v1.Node) *evictions.PodEvictor {
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet) evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
if err != nil || len(evictionPolicyGroupVersion) == 0 { if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("Error creating eviction policy group: %v", err) t.Fatalf("Error creating eviction policy group: %v", err)
@@ -1338,6 +1350,7 @@ func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, nodes []*
nil, nil,
nil, nil,
nodes, nodes,
getPodsAssignedToNode,
true, true,
false, false,
false, false,

View File

@@ -137,6 +137,7 @@ func TestTooManyRestarts(t *testing.T) {
nil, nil,
nil, nil,
nodes, nodes,
getPodsAssignedToNode,
true, true,
false, false,
false, false,
@@ -183,14 +184,25 @@ func waitPodRestartCount(ctx context.Context, clientSet clientset.Interface, nam
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"})).String(), LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"})).String(),
}) })
if podList.Items[0].Status.ContainerStatuses[0].RestartCount >= 4 && podList.Items[1].Status.ContainerStatuses[0].RestartCount >= 4 && podList.Items[2].Status.ContainerStatuses[0].RestartCount >= 4 && podList.Items[3].Status.ContainerStatuses[0].RestartCount >= 4 {
t.Log("Pod restartCount as expected")
return true, nil
}
if err != nil { if err != nil {
t.Fatalf("Unexpected err: %v", err) t.Fatalf("Unexpected err: %v", err)
return false, err return false, err
} }
if len(podList.Items) < 4 {
t.Log("Waiting for 4 pods")
return false, nil
}
for i := 0; i < 4; i++ {
if len(podList.Items[0].Status.ContainerStatuses) < 1 {
t.Logf("Waiting for podList.Items[%v].Status.ContainerStatuses to be populated", i)
return false, nil
}
}
if podList.Items[0].Status.ContainerStatuses[0].RestartCount >= 4 && podList.Items[1].Status.ContainerStatuses[0].RestartCount >= 4 && podList.Items[2].Status.ContainerStatuses[0].RestartCount >= 4 && podList.Items[3].Status.ContainerStatuses[0].RestartCount >= 4 {
t.Log("Pod restartCount as expected")
return true, nil
}
} }
} }
} }

View File

@@ -77,7 +77,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
defer deleteRC(ctx, t, clientSet, violatorRc) defer deleteRC(ctx, t, clientSet, violatorRc)
waitForRCPodsRunning(ctx, t, clientSet, violatorRc) waitForRCPodsRunning(ctx, t, clientSet, violatorRc)
podEvictor := initPodEvictorOrFail(t, clientSet, nodes) podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
// Run TopologySpreadConstraint strategy // Run TopologySpreadConstraint strategy
t.Logf("Running RemovePodsViolatingTopologySpreadConstraint strategy for %s", name) t.Logf("Running RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)

View File

@@ -0,0 +1,11 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 5
namespaces:
include:
- "e2e-testleaderelection-a"

View File

@@ -0,0 +1,11 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 5
namespaces:
include:
- "e2e-testleaderelection-b"

View File

@@ -20,9 +20,9 @@ set -o nounset
# This just runs e2e tests. # This just runs e2e tests.
if [ -n "$KIND_E2E" ]; then if [ -n "$KIND_E2E" ]; then
K8S_VERSION=${KUBERNETES_VERSION:-v1.21.1} K8S_VERSION=${KUBERNETES_VERSION:-v1.24.0}
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && mv kubectl /usr/local/bin/ curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && mv kubectl /usr/local/bin/
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.11.0/kind-linux-amd64 wget https://github.com/kubernetes-sigs/kind/releases/download/v0.13.0/kind-linux-amd64
chmod +x kind-linux-amd64 chmod +x kind-linux-amd64
mv kind-linux-amd64 kind mv kind-linux-amd64 kind
export PATH=$PATH:$PWD export PATH=$PATH:$PWD

View File

@@ -1,21 +0,0 @@
language: go
matrix:
include:
- go: 1.4.3
- go: 1.5.4
- go: 1.6.3
- go: 1.7
- go: tip
allow_failures:
- go: tip
install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
script:
- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci
-repotoken $COVERALLS_TOKEN
- echo "Build examples" ; cd examples && go build
- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .)
env:
global:
secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw=

View File

@@ -1,194 +0,0 @@
semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
======
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
Usage
-----
```bash
$ go get github.com/blang/semver
```
Note: Always vendor your dependencies or fix on a specific version tag.
```go
import github.com/blang/semver
v1, err := semver.Make("1.0.0-beta")
v2, err := semver.Make("2.0.0-beta")
v1.Compare(v2)
```
Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
Why should I use this lib?
-----
- Fully spec compatible
- No reflection
- No regex
- Fully tested (Coverage >99%)
- Readable parsing/validation errors
- Fast (See [Benchmarks](#benchmarks))
- Only Stdlib
- Uses values instead of pointers
- Many features, see below
Features
-----
- Parsing and validation at all levels
- Comparator-like comparisons
- Compare Helper Methods
- InPlace manipulation
- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
- Wildcards `>=1.x`, `<=2.5.x`
- Sortable (implements sort.Interface)
- database/sql compatible (sql.Scanner/Valuer)
- encoding/json compatible (json.Marshaler/Unmarshaler)
Ranges
------
A `Range` is a set of conditions which specify which versions satisfy the range.
A condition is composed of an operator and a version. The supported operators are:
- `<1.0.0` Less than `1.0.0`
- `<=1.0.0` Less than or equal to `1.0.0`
- `>1.0.0` Greater than `1.0.0`
- `>=1.0.0` Greater than or equal to `1.0.0`
- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
Note that spaces between the operator and the version will be gracefully tolerated.
A `Range` can link multiple `Ranges` separated by space:
Ranges can be linked by logical AND:
- `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0`
- `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2`
Ranges can also be linked by logical OR:
- `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x`
AND has a higher precedence than OR. It's not possible to use brackets.
Ranges can be combined by both AND and OR
- `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
Range usage:
```
v, err := semver.Parse("1.2.3")
range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0")
if range(v) {
//valid
}
```
Example
-----
Have a look at full examples in [examples/main.go](examples/main.go)
```go
import github.com/blang/semver
v, err := semver.Make("0.0.1-alpha.preview+123.github")
fmt.Printf("Major: %d\n", v.Major)
fmt.Printf("Minor: %d\n", v.Minor)
fmt.Printf("Patch: %d\n", v.Patch)
fmt.Printf("Pre: %s\n", v.Pre)
fmt.Printf("Build: %s\n", v.Build)
// Prerelease versions array
if len(v.Pre) > 0 {
fmt.Println("Prerelease versions:")
for i, pre := range v.Pre {
fmt.Printf("%d: %q\n", i, pre)
}
}
// Build meta data array
if len(v.Build) > 0 {
fmt.Println("Build meta data:")
for i, build := range v.Build {
fmt.Printf("%d: %q\n", i, build)
}
}
v001, err := semver.Make("0.0.1")
// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
v001.GT(v) == true
v.LT(v001) == true
v.GTE(v) == true
v.LTE(v) == true
// Or use v.Compare(v2) for comparisons (-1, 0, 1):
v001.Compare(v) == 1
v.Compare(v001) == -1
v.Compare(v) == 0
// Manipulate Version in place:
v.Pre[0], err = semver.NewPRVersion("beta")
if err != nil {
fmt.Printf("Error parsing pre release version: %q", err)
}
fmt.Println("\nValidate versions:")
v.Build[0] = "?"
err = v.Validate()
if err != nil {
fmt.Printf("Validation failed: %s\n", err)
}
```
Benchmarks
-----
BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op
BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op
BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op
BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op
BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op
BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op
BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op
BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op
BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op
BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op
BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op
BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op
BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op
BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op
BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op
BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op
BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op
BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op
BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op
BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op
See benchmark cases at [semver_test.go](semver_test.go)
Motivation
-----
I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
Contribution
-----
Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
License
-----
See [LICENSE](LICENSE) file.

View File

@@ -1,17 +0,0 @@
{
"author": "blang",
"bugs": {
"URL": "https://github.com/blang/semver/issues",
"url": "https://github.com/blang/semver/issues"
},
"gx": {
"dvcsimport": "github.com/blang/semver"
},
"gxVersion": "0.10.0",
"language": "go",
"license": "MIT",
"name": "semver",
"releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
"version": "3.5.1"
}

View File

@@ -327,7 +327,7 @@ func expandWildcardVersion(parts [][]string) ([][]string, error) {
for _, p := range parts { for _, p := range parts {
var newParts []string var newParts []string
for _, ap := range p { for _, ap := range p {
if strings.Index(ap, "x") != -1 { if strings.Contains(ap, "x") {
opStr, vStr, err := splitComparatorVersion(ap) opStr, vStr, err := splitComparatorVersion(ap)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@@ -26,7 +26,7 @@ type Version struct {
Minor uint64 Minor uint64
Patch uint64 Patch uint64
Pre []PRVersion Pre []PRVersion
Build []string //No Precendence Build []string //No Precedence
} }
// Version to string // Version to string
@@ -61,6 +61,18 @@ func (v Version) String() string {
return string(b) return string(b)
} }
// FinalizeVersion discards prerelease and build number and only returns
// major, minor and patch number.
func (v Version) FinalizeVersion() string {
b := make([]byte, 0, 5)
b = strconv.AppendUint(b, v.Major, 10)
b = append(b, '.')
b = strconv.AppendUint(b, v.Minor, 10)
b = append(b, '.')
b = strconv.AppendUint(b, v.Patch, 10)
return string(b)
}
// Equals checks if v is equal to o. // Equals checks if v is equal to o.
func (v Version) Equals(o Version) bool { func (v Version) Equals(o Version) bool {
return (v.Compare(o) == 0) return (v.Compare(o) == 0)
@@ -161,6 +173,27 @@ func (v Version) Compare(o Version) int {
} }
// IncrementPatch increments the patch version
func (v *Version) IncrementPatch() error {
v.Patch++
return nil
}
// IncrementMinor increments the minor version
func (v *Version) IncrementMinor() error {
v.Minor++
v.Patch = 0
return nil
}
// IncrementMajor increments the major version
func (v *Version) IncrementMajor() error {
v.Major++
v.Minor = 0
v.Patch = 0
return nil
}
// Validate validates v and returns error in case // Validate validates v and returns error in case
func (v Version) Validate() error { func (v Version) Validate() error {
// Major, Minor, Patch already validated using uint64 // Major, Minor, Patch already validated using uint64
@@ -189,10 +222,10 @@ func (v Version) Validate() error {
} }
// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error // New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
func New(s string) (vp *Version, err error) { func New(s string) (*Version, error) {
v, err := Parse(s) v, err := Parse(s)
vp = &v vp := &v
return return vp, err
} }
// Make is an alias for Parse, parses version string and returns a validated Version or error // Make is an alias for Parse, parses version string and returns a validated Version or error
@@ -202,14 +235,25 @@ func Make(s string) (Version, error) {
// ParseTolerant allows for certain version specifications that do not strictly adhere to semver // ParseTolerant allows for certain version specifications that do not strictly adhere to semver
// specs to be parsed by this library. It does so by normalizing versions before passing them to // specs to be parsed by this library. It does so by normalizing versions before passing them to
// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions // Parse(). It currently trims spaces, removes a "v" prefix, adds a 0 patch number to versions
// with only major and minor components specified // with only major and minor components specified, and removes leading 0s.
func ParseTolerant(s string) (Version, error) { func ParseTolerant(s string) (Version, error) {
s = strings.TrimSpace(s) s = strings.TrimSpace(s)
s = strings.TrimPrefix(s, "v") s = strings.TrimPrefix(s, "v")
// Split into major.minor.(patch+pr+meta) // Split into major.minor.(patch+pr+meta)
parts := strings.SplitN(s, ".", 3) parts := strings.SplitN(s, ".", 3)
// Remove leading zeros.
for i, p := range parts {
if len(p) > 1 {
p = strings.TrimLeft(p, "0")
if len(p) == 0 || !strings.ContainsAny(p[0:1], "0123456789") {
p = "0" + p
}
parts[i] = p
}
}
// Fill up shortened versions.
if len(parts) < 3 { if len(parts) < 3 {
if strings.ContainsAny(parts[len(parts)-1], "+-") { if strings.ContainsAny(parts[len(parts)-1], "+-") {
return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data")
@@ -217,8 +261,8 @@ func ParseTolerant(s string) (Version, error) {
for len(parts) < 3 { for len(parts) < 3 {
parts = append(parts, "0") parts = append(parts, "0")
} }
s = strings.Join(parts, ".")
} }
s = strings.Join(parts, ".")
return Parse(s) return Parse(s)
} }
@@ -416,3 +460,17 @@ func NewBuildVersion(s string) (string, error) {
} }
return s, nil return s, nil
} }
// FinalizeVersion returns the major, minor and patch number only and discards
// prerelease and build number.
func FinalizeVersion(s string) (string, error) {
v, err := Parse(s)
if err != nil {
return "", err
}
v.Pre = nil
v.Build = nil
finalVer := v.String()
return finalVer, nil
}

View File

@@ -14,7 +14,7 @@ func (v *Version) Scan(src interface{}) (err error) {
case []byte: case []byte:
str = string(src) str = string(src)
default: default:
return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) return fmt.Errorf("version.Scan: cannot convert %T to string", src)
} }
if t, err := Parse(str); err == nil { if t, err := Parse(str); err == nil {

View File

@@ -1,8 +0,0 @@
language: go
go:
- "1.x"
- master
env:
- TAGS=""
- TAGS="-tags purego"
script: go test $TAGS -v ./...

View File

@@ -1,7 +1,7 @@
# xxhash # xxhash
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) [![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) [![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
xxhash is a Go implementation of the 64-bit xxhash is a Go implementation of the 64-bit
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a [xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
@@ -64,4 +64,6 @@ $ go test -benchtime 10s -bench '/xxhash,direct,bytes'
- [InfluxDB](https://github.com/influxdata/influxdb) - [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus) - [Prometheus](https://github.com/prometheus/prometheus)
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache) - [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)

View File

@@ -193,7 +193,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
b, d.v4 = consumeUint64(b) b, d.v4 = consumeUint64(b)
b, d.total = consumeUint64(b) b, d.total = consumeUint64(b)
copy(d.mem[:], b) copy(d.mem[:], b)
b = b[len(d.mem):]
d.n = int(d.total % uint64(len(d.mem))) d.n = int(d.total % uint64(len(d.mem)))
return nil return nil
} }

View File

@@ -6,7 +6,7 @@
// Register allocation: // Register allocation:
// AX h // AX h
// CX pointer to advance through b // SI pointer to advance through b
// DX n // DX n
// BX loop end // BX loop end
// R8 v1, k1 // R8 v1, k1
@@ -16,39 +16,39 @@
// R12 tmp // R12 tmp
// R13 prime1v // R13 prime1v
// R14 prime2v // R14 prime2v
// R15 prime4v // DI prime4v
// round reads from and advances the buffer pointer in CX. // round reads from and advances the buffer pointer in SI.
// It assumes that R13 has prime1v and R14 has prime2v. // It assumes that R13 has prime1v and R14 has prime2v.
#define round(r) \ #define round(r) \
MOVQ (CX), R12 \ MOVQ (SI), R12 \
ADDQ $8, CX \ ADDQ $8, SI \
IMULQ R14, R12 \ IMULQ R14, R12 \
ADDQ R12, r \ ADDQ R12, r \
ROLQ $31, r \ ROLQ $31, r \
IMULQ R13, r IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val. // mergeRound applies a merge round on the two registers acc and val.
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. // It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
#define mergeRound(acc, val) \ #define mergeRound(acc, val) \
IMULQ R14, val \ IMULQ R14, val \
ROLQ $31, val \ ROLQ $31, val \
IMULQ R13, val \ IMULQ R13, val \
XORQ val, acc \ XORQ val, acc \
IMULQ R13, acc \ IMULQ R13, acc \
ADDQ R15, acc ADDQ DI, acc
// func Sum64(b []byte) uint64 // func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32 TEXT ·Sum64(SB), NOSPLIT, $0-32
// Load fixed primes. // Load fixed primes.
MOVQ ·prime1v(SB), R13 MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14 MOVQ ·prime2v(SB), R14
MOVQ ·prime4v(SB), R15 MOVQ ·prime4v(SB), DI
// Load slice. // Load slice.
MOVQ b_base+0(FP), CX MOVQ b_base+0(FP), SI
MOVQ b_len+8(FP), DX MOVQ b_len+8(FP), DX
LEAQ (CX)(DX*1), BX LEAQ (SI)(DX*1), BX
// The first loop limit will be len(b)-32. // The first loop limit will be len(b)-32.
SUBQ $32, BX SUBQ $32, BX
@@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
XORQ R11, R11 XORQ R11, R11
SUBQ R13, R11 SUBQ R13, R11
// Loop until CX > BX. // Loop until SI > BX.
blockLoop: blockLoop:
round(R8) round(R8)
round(R9) round(R9)
round(R10) round(R10)
round(R11) round(R11)
CMPQ CX, BX CMPQ SI, BX
JLE blockLoop JLE blockLoop
MOVQ R8, AX MOVQ R8, AX
@@ -100,16 +100,16 @@ noBlocks:
afterBlocks: afterBlocks:
ADDQ DX, AX ADDQ DX, AX
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
ADDQ $24, BX ADDQ $24, BX
CMPQ CX, BX CMPQ SI, BX
JG fourByte JG fourByte
wordLoop: wordLoop:
// Calculate k1. // Calculate k1.
MOVQ (CX), R8 MOVQ (SI), R8
ADDQ $8, CX ADDQ $8, SI
IMULQ R14, R8 IMULQ R14, R8
ROLQ $31, R8 ROLQ $31, R8
IMULQ R13, R8 IMULQ R13, R8
@@ -117,18 +117,18 @@ wordLoop:
XORQ R8, AX XORQ R8, AX
ROLQ $27, AX ROLQ $27, AX
IMULQ R13, AX IMULQ R13, AX
ADDQ R15, AX ADDQ DI, AX
CMPQ CX, BX CMPQ SI, BX
JLE wordLoop JLE wordLoop
fourByte: fourByte:
ADDQ $4, BX ADDQ $4, BX
CMPQ CX, BX CMPQ SI, BX
JG singles JG singles
MOVL (CX), R8 MOVL (SI), R8
ADDQ $4, CX ADDQ $4, SI
IMULQ R13, R8 IMULQ R13, R8
XORQ R8, AX XORQ R8, AX
@@ -138,19 +138,19 @@ fourByte:
singles: singles:
ADDQ $4, BX ADDQ $4, BX
CMPQ CX, BX CMPQ SI, BX
JGE finalize JGE finalize
singlesLoop: singlesLoop:
MOVBQZX (CX), R12 MOVBQZX (SI), R12
ADDQ $1, CX ADDQ $1, SI
IMULQ ·prime5v(SB), R12 IMULQ ·prime5v(SB), R12
XORQ R12, AX XORQ R12, AX
ROLQ $11, AX ROLQ $11, AX
IMULQ R13, AX IMULQ R13, AX
CMPQ CX, BX CMPQ SI, BX
JL singlesLoop JL singlesLoop
finalize: finalize:
@@ -179,9 +179,9 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
MOVQ ·prime2v(SB), R14 MOVQ ·prime2v(SB), R14
// Load slice. // Load slice.
MOVQ b_base+8(FP), CX MOVQ b_base+8(FP), SI
MOVQ b_len+16(FP), DX MOVQ b_len+16(FP), DX
LEAQ (CX)(DX*1), BX LEAQ (SI)(DX*1), BX
SUBQ $32, BX SUBQ $32, BX
// Load vN from d. // Load vN from d.
@@ -199,7 +199,7 @@ blockLoop:
round(R10) round(R10)
round(R11) round(R11)
CMPQ CX, BX CMPQ SI, BX
JLE blockLoop JLE blockLoop
// Copy vN back to d. // Copy vN back to d.
@@ -208,8 +208,8 @@ blockLoop:
MOVQ R10, 16(AX) MOVQ R10, 16(AX)
MOVQ R11, 24(AX) MOVQ R11, 24(AX)
// The number of bytes written is CX minus the old base pointer. // The number of bytes written is SI minus the old base pointer.
SUBQ b_base+8(FP), CX SUBQ b_base+8(FP), SI
MOVQ CX, ret+32(FP) MOVQ SI, ret+32(FP)
RET RET

View File

@@ -6,41 +6,52 @@
package xxhash package xxhash
import ( import (
"reflect"
"unsafe" "unsafe"
) )
// Notes:
//
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
// for some discussion about these unsafe conversions.
//
// In the future it's possible that compiler optimizations will make these // In the future it's possible that compiler optimizations will make these
// unsafe operations unnecessary: https://golang.org/issue/2205. // XxxString functions unnecessary by realizing that calls such as
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
// If that happens, even if we keep these functions they can be replaced with
// the trivial safe code.
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
// //
// Both of these wrapper functions still incur function call overhead since they // var b []byte
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write // bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
// for strings to squeeze out a bit more speed. Mid-stack inlining should // bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
// eventually fix this. // bh.Len = len(s)
// bh.Cap = len(s)
//
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
// weight to this sequence of expressions that any function that uses it will
// not be inlined. Instead, the functions below use a different unsafe
// conversion designed to minimize the inliner weight and allow both to be
// inlined. There is also a test (TestInlining) which verifies that these are
// inlined.
//
// See https://github.com/golang/go/issues/42739 for discussion.
// Sum64String computes the 64-bit xxHash digest of s. // Sum64String computes the 64-bit xxHash digest of s.
// It may be faster than Sum64([]byte(s)) by avoiding a copy. // It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 { func Sum64String(s string) uint64 {
var b []byte b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
bh.Len = len(s)
bh.Cap = len(s)
return Sum64(b) return Sum64(b)
} }
// WriteString adds more data to d. It always returns len(s), nil. // WriteString adds more data to d. It always returns len(s), nil.
// It may be faster than Write([]byte(s)) by avoiding a copy. // It may be faster than Write([]byte(s)) by avoiding a copy.
func (d *Digest) WriteString(s string) (n int, err error) { func (d *Digest) WriteString(s string) (n int, err error) {
var b []byte d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) // d.Write always returns len(s), nil.
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data // Ignoring the return output and returning these fixed values buys a
bh.Len = len(s) // savings of 6 in the inliner's cost model.
bh.Cap = len(s) return len(s), nil
return d.Write(b) }
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
// of the first two words is the same as the layout of a string.
type sliceHeader struct {
s string
cap int
} }

View File

@@ -22,8 +22,9 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/any"
extensions "github.com/googleapis/gnostic/extensions"
yaml "gopkg.in/yaml.v3" yaml "gopkg.in/yaml.v3"
extensions "github.com/google/gnostic/extensions"
) )
// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. // ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.

View File

@@ -20,8 +20,9 @@ import (
"sort" "sort"
"strconv" "strconv"
"github.com/googleapis/gnostic/jsonschema"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
"github.com/google/gnostic/jsonschema"
) )
// compiler helper functions, usually called from generated code // compiler helper functions, usually called from generated code

View File

@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.26.0 // protoc-gen-go v1.26.0
// protoc v3.15.5 // protoc v3.18.1
// source: extensions/extension.proto // source: extensions/extension.proto
package gnostic_extension_v1 package gnostic_extension_v1

Some files were not shown because too many files have changed in this diff Show More