mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Compare commits
94 Commits
deschedule
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c86d1c7eb2 | ||
|
|
f67c265533 | ||
|
|
969921640c | ||
|
|
0273fd7597 | ||
|
|
e84d0c5587 | ||
|
|
5267ec407c | ||
|
|
6714d8e0b7 | ||
|
|
b3439eab41 | ||
|
|
509118587a | ||
|
|
f482537dff | ||
|
|
0f95817746 | ||
|
|
70d1fadae7 | ||
|
|
499beb2fd7 | ||
|
|
de24f3854b | ||
|
|
a5e8ba1a70 | ||
|
|
45ad48042f | ||
|
|
550de966c7 | ||
|
|
c94342db31 | ||
|
|
94f1c7dd8d | ||
|
|
bf91e6790e | ||
|
|
251f44e568 | ||
|
|
922c4f6a63 | ||
|
|
2b5ec01381 | ||
|
|
7bcd562ff5 | ||
|
|
03852d0914 | ||
|
|
5f1e9a97c4 | ||
|
|
cd6f2cd4cb | ||
|
|
e679c7fabc | ||
|
|
6f5918d765 | ||
|
|
5a46ba0630 | ||
|
|
c1323719f4 | ||
|
|
8795fe6b90 | ||
|
|
a3f8bb0369 | ||
|
|
cd3c3bf4da | ||
|
|
652ee87bf5 | ||
|
|
5225ec4597 | ||
|
|
b2af720ddb | ||
|
|
94f07996f7 | ||
|
|
4839d5f369 | ||
|
|
022e07c278 | ||
|
|
620d71abdf | ||
|
|
85d00ab457 | ||
|
|
b30bd40860 | ||
|
|
4108362158 | ||
|
|
b27dc5f14e | ||
|
|
3c54378749 | ||
|
|
6240aa68f7 | ||
|
|
301af7fd9c | ||
|
|
41d529ebe2 | ||
|
|
cc6bb633ba | ||
|
|
31cf70c34c | ||
|
|
3399619395 | ||
|
|
cfc4cce08b | ||
|
|
f9e9f0654a | ||
|
|
73af0e84fa | ||
|
|
b33928ac91 | ||
|
|
3ac0c408de | ||
|
|
149f900811 | ||
|
|
9ede04ba9b | ||
|
|
f9cbed8b71 | ||
|
|
fa4da031e4 | ||
|
|
9511f308d0 | ||
|
|
52f43f0fcb | ||
|
|
4bb0ceeed5 | ||
|
|
279f648e9a | ||
|
|
411ec740ff | ||
|
|
6237ba5a43 | ||
|
|
5d65a9ad68 | ||
|
|
28f3f867c3 | ||
|
|
00f79aa28d | ||
|
|
6042d717e9 | ||
|
|
7afa54519f | ||
|
|
8c3a80fbf9 | ||
|
|
11b9829885 | ||
|
|
4798559545 | ||
|
|
8b34d6eb94 | ||
|
|
70700a1c97 | ||
|
|
d7420eb945 | ||
|
|
c9cfeb35c2 | ||
|
|
fda63a816f | ||
|
|
6329b6c27b | ||
|
|
9b4f781c5c | ||
|
|
63039fcfd6 | ||
|
|
d25f3757d6 | ||
|
|
1303fe6eb9 | ||
|
|
1682cc9462 | ||
|
|
605927676f | ||
|
|
dc41e6a41c | ||
|
|
e37c27313e | ||
|
|
e5d9756ebe | ||
|
|
e6f1c6f78a | ||
|
|
fceebded6d | ||
|
|
08b2dffa42 | ||
|
|
50d2b246d9 |
15
.github/workflows/release.yaml
vendored
15
.github/workflows/release.yaml
vendored
@@ -11,20 +11,21 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Fetch history
|
||||
run: git fetch --prune --unshallow
|
||||
|
||||
- name: Add dependency chart repos
|
||||
run: |
|
||||
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: v3.4.0
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.0.0-rc.2
|
||||
uses: helm/chart-releaser-action@v1.1.0
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
CR_RELEASE_NAME_TEMPLATE: "descheduler-helm-chart-{{ .Version }}"
|
||||
|
||||
@@ -11,11 +11,13 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.15.0
|
||||
FROM golang:1.15.5
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
RUN make
|
||||
ARG ARCH
|
||||
ARG VERSION
|
||||
RUN VERSION=${VERSION} make build.$ARCH
|
||||
|
||||
FROM scratch
|
||||
|
||||
|
||||
41
Makefile
41
Makefile
@@ -14,13 +14,13 @@
|
||||
|
||||
.PHONY: test
|
||||
|
||||
# VERSION is currently based on the last commit
|
||||
VERSION?=$(shell git describe --tags --match "v*")
|
||||
COMMIT=$(shell git rev-parse HEAD)
|
||||
# VERSION is based on a date stamp plus the last commit
|
||||
VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags --match "v*")
|
||||
BUILD=$(shell date +%FT%T%z)
|
||||
LDFLAG_LOCATION=sigs.k8s.io/descheduler/cmd/descheduler/app
|
||||
ARCHS = amd64 arm64
|
||||
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitCommit=${COMMIT}"
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD}"
|
||||
|
||||
GOLANGCI_VERSION := v1.30.0
|
||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint)
|
||||
@@ -48,23 +48,48 @@ all: build
|
||||
build:
|
||||
CGO_ENABLED=0 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||
|
||||
build.amd64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||
|
||||
build.arm64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||
|
||||
dev-image: build
|
||||
docker build -f Dockerfile.dev -t $(IMAGE) .
|
||||
|
||||
image:
|
||||
docker build -t $(IMAGE) .
|
||||
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE) .
|
||||
|
||||
push-container-to-gcloud: image
|
||||
image.amd64:
|
||||
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE)-amd64 .
|
||||
|
||||
image.arm64:
|
||||
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm64" -t $(IMAGE)-arm64 .
|
||||
|
||||
push: image
|
||||
gcloud auth configure-docker
|
||||
docker tag $(IMAGE) $(IMAGE_GCLOUD)
|
||||
docker push $(IMAGE_GCLOUD)
|
||||
|
||||
push: push-container-to-gcloud
|
||||
push-all: image.amd64 image.arm64
|
||||
gcloud auth configure-docker
|
||||
for arch in $(ARCHS); do \
|
||||
docker tag $(IMAGE)-$${arch} $(IMAGE_GCLOUD)-$${arch} ;\
|
||||
docker push $(IMAGE_GCLOUD)-$${arch} ;\
|
||||
done
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(IMAGE_GCLOUD) $(addprefix --amend $(IMAGE_GCLOUD)-, $(ARCHS))
|
||||
for arch in $(ARCHS); do \
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest annotate --arch $${arch} $(IMAGE_GCLOUD) $(IMAGE_GCLOUD)-$${arch} ;\
|
||||
done
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push $(IMAGE_GCLOUD) ;\
|
||||
|
||||
clean:
|
||||
rm -rf _output
|
||||
|
||||
verify: verify-gofmt verify-vendor lint lint-chart
|
||||
verify: verify-gofmt verify-vendor lint lint-chart verify-spelling
|
||||
|
||||
verify-spelling:
|
||||
./hack/verify-spelling.sh
|
||||
|
||||
verify-gofmt:
|
||||
./hack/verify-gofmt.sh
|
||||
|
||||
220
README.md
220
README.md
@@ -29,6 +29,7 @@ Table of Contents
|
||||
* [Run As A Job](#run-as-a-job)
|
||||
* [Run As A CronJob](#run-as-a-cronjob)
|
||||
* [Install Using Helm](#install-using-helm)
|
||||
* [Install Using Kustomize](#install-using-kustomize)
|
||||
* [User Guide](#user-guide)
|
||||
* [Policy and Strategies](#policy-and-strategies)
|
||||
* [RemoveDuplicates](#removeduplicates)
|
||||
@@ -36,6 +37,7 @@ Table of Contents
|
||||
* [RemovePodsViolatingInterPodAntiAffinity](#removepodsviolatinginterpodantiaffinity)
|
||||
* [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity)
|
||||
* [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints)
|
||||
* [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint)
|
||||
* [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts)
|
||||
* [PodLifeTime](#podlifetime)
|
||||
* [Filter Pods](#filter-pods)
|
||||
@@ -59,17 +61,17 @@ being evicted by itself or by the kubelet.
|
||||
### Run As A Job
|
||||
|
||||
```
|
||||
kubectl create -f kubernetes/rbac.yaml
|
||||
kubectl create -f kubernetes/configmap.yaml
|
||||
kubectl create -f kubernetes/job.yaml
|
||||
kubectl create -f kubernetes/base/rbac.yaml
|
||||
kubectl create -f kubernetes/base/configmap.yaml
|
||||
kubectl create -f kubernetes/job/job.yaml
|
||||
```
|
||||
|
||||
### Run As A CronJob
|
||||
|
||||
```
|
||||
kubectl create -f kubernetes/rbac.yaml
|
||||
kubectl create -f kubernetes/configmap.yaml
|
||||
kubectl create -f kubernetes/cronjob.yaml
|
||||
kubectl create -f kubernetes/base/rbac.yaml
|
||||
kubectl create -f kubernetes/base/configmap.yaml
|
||||
kubectl create -f kubernetes/cronjob/cronjob.yaml
|
||||
```
|
||||
|
||||
### Install Using Helm
|
||||
@@ -77,6 +79,23 @@ kubectl create -f kubernetes/cronjob.yaml
|
||||
Starting with release v0.18.0 there is an official helm chart that can be used to install the
|
||||
descheduler. See the [helm chart README](https://github.com/kubernetes-sigs/descheduler/blob/master/charts/descheduler/README.md) for detailed instructions.
|
||||
|
||||
The descheduler helm chart is also listed on the [artifact hub](https://artifacthub.io/packages/helm/descheduler/descheduler).
|
||||
|
||||
### Install Using Kustomize
|
||||
|
||||
You can use kustomize to install descheduler.
|
||||
See the [resources | Kustomize](https://kubernetes-sigs.github.io/kustomize/api-reference/kustomization/resources/) for detailed instructions.
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=master' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=master' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
|
||||
See the [user guide](docs/user-guide.md) in the `/docs` directory.
|
||||
@@ -84,17 +103,17 @@ See the [user guide](docs/user-guide.md) in the `/docs` directory.
|
||||
## Policy and Strategies
|
||||
|
||||
Descheduler's policy is configurable and includes strategies that can be enabled or disabled.
|
||||
Seven strategies `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingInterPodAntiAffinity`,
|
||||
`RemovePodsViolatingNodeAffinity`, `RemovePodsViolatingNodeTaints`, `RemovePodsHavingTooManyRestarts`, and `PodLifeTime`
|
||||
are currently implemented. As part of the policy, the parameters associated with the strategies can be configured too.
|
||||
By default, all strategies are enabled.
|
||||
Eight strategies `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingInterPodAntiAffinity`,
|
||||
`RemovePodsViolatingNodeAffinity`, `RemovePodsViolatingNodeTaints`, `RemovePodsViolatingTopologySpreadConstraint`,
|
||||
`RemovePodsHavingTooManyRestarts`, and `PodLifeTime` are currently implemented. As part of the policy, the
|
||||
parameters associated with the strategies can be configured too. By default, all strategies are enabled.
|
||||
|
||||
The policy also includes common configuration for all the strategies:
|
||||
- `nodeSelector` - limiting the nodes which are processed
|
||||
- `evictLocalStoragePods` - allowing to evict pods with local storage
|
||||
- `maxNoOfPodsToEvictPerNode` - maximum number of pods evicted from each node (summed through all strategies)
|
||||
|
||||
```
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
nodeSelector: prod=dev
|
||||
@@ -116,7 +135,17 @@ are ready again, this strategy could be enabled to evict those duplicate pods.
|
||||
It provides one optional parameter, `ExcludeOwnerKinds`, which is a list of OwnerRef `Kind`s. If a pod
|
||||
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
|
||||
|
||||
```
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`excludeOwnerKinds`|list(string)|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|
||||
**Example:**
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
@@ -146,9 +175,20 @@ considered appropriately utilized and is not considered for eviction. The thresh
|
||||
can be configured for cpu, memory, and number of pods too in terms of percentage.
|
||||
|
||||
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements.
|
||||
Here is an example of a policy for this strategy:
|
||||
|
||||
```
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`thresholds`|map(string:int)|
|
||||
|`targetThresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
@@ -186,15 +226,24 @@ This strategy makes sure that pods violating interpod anti-affinity are removed
|
||||
if there is podA on a node and podB and podC (running on the same node) have anti-affinity rules which prohibit
|
||||
them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This
|
||||
issue could happen, when the anti-affinity rules for podB and podC are created when they are already running on
|
||||
node. Currently, there are no parameters associated with this strategy. To disable this strategy, the
|
||||
policy should look like:
|
||||
node.
|
||||
|
||||
```
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: false
|
||||
enabled: true
|
||||
```
|
||||
|
||||
### RemovePodsViolatingNodeAffinity
|
||||
@@ -215,9 +264,18 @@ of scheduling. Over time nodeA stops to satisfy the rule. When the strategy gets
|
||||
executed and there is another node available that satisfies the node affinity rule,
|
||||
podA gets evicted from nodeA.
|
||||
|
||||
The policy file should look like:
|
||||
**Parameters:**
|
||||
|
||||
```
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`nodeAffinityType`|list(string)|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
@@ -233,9 +291,19 @@ strategies:
|
||||
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example there is a
|
||||
pod "podA" with a toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted
|
||||
node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations
|
||||
and will be evicted. The policy file should look like:
|
||||
and will be evicted.
|
||||
|
||||
````
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
````yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
@@ -243,11 +311,51 @@ strategies:
|
||||
enabled: true
|
||||
````
|
||||
|
||||
### RemovePodsViolatingTopologySpreadConstraint
|
||||
|
||||
This strategy makes sure that pods violating [topology spread constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/)
|
||||
are evicted from nodes. Specifically, it tries to evict the minimum number of pods required to balance topology domains to within each constraint's `maxSkew`.
|
||||
This strategy requires k8s version 1.18 at a minimum.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingTopologySpreadConstraint":
|
||||
enabled: true
|
||||
```
|
||||
|
||||
|
||||
### RemovePodsHavingTooManyRestarts
|
||||
|
||||
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes.
|
||||
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that
|
||||
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
|
||||
include `podRestartThreshold`, which is the number of restarts at which a pod should be evicted, and `includingInitContainers`,
|
||||
which determines whether init container restarts should be factored into that calculation.
|
||||
|
||||
```
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`podRestartThreshold`|int|
|
||||
|`includingInitContainers`|bool|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
@@ -261,36 +369,60 @@ strategies:
|
||||
|
||||
### PodLifeTime
|
||||
|
||||
This strategy evicts pods that are older than `.strategies.PodLifeTime.params.maxPodLifeTimeSeconds` The policy
|
||||
file should look like:
|
||||
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
|
||||
|
||||
````
|
||||
You can also specify `podStatusPhases` to `only` evict pods with specific `StatusPhases`, currently this parameter is limited
|
||||
to `Running` and `Pending`.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`maxPodLifeTimeSeconds`|int|
|
||||
|`podStatusPhases`|list(string)|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
````
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
podStatusPhases:
|
||||
- "Pending"
|
||||
```
|
||||
|
||||
## Filter Pods
|
||||
|
||||
### Namespace filtering
|
||||
|
||||
Strategies like `PodLifeTime`, `RemovePodsHavingTooManyRestarts`, `RemovePodsViolatingNodeTaints`,
|
||||
`RemovePodsViolatingNodeAffinity` and `RemovePodsViolatingInterPodAntiAffinity` can specify `namespaces`
|
||||
parameter which allows to specify a list of including, resp. excluding namespaces.
|
||||
E.g.
|
||||
The following strategies accept a `namespaces` parameter which allows to specify a list of including, resp. excluding namespaces:
|
||||
* `PodLifeTime`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
* `RemovePodsViolatingNodeAffinity`
|
||||
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||
* `RemoveDuplicates`
|
||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||
|
||||
```
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
namespaces:
|
||||
include:
|
||||
- "namespace1"
|
||||
@@ -300,14 +432,15 @@ strategies:
|
||||
In the examples `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
|
||||
The similar holds for `exclude` field:
|
||||
|
||||
```
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
namespaces:
|
||||
exclude:
|
||||
- "namespace1"
|
||||
@@ -327,26 +460,28 @@ is set to the value of `system-cluster-critical` priority class.
|
||||
E.g.
|
||||
|
||||
Setting `thresholdPriority`
|
||||
```
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
thresholdPriority: 10000
|
||||
```
|
||||
|
||||
Setting `thresholdPriorityClassName`
|
||||
```
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
thresholdPriorityClassName: "priorityclass1"
|
||||
```
|
||||
|
||||
@@ -361,7 +496,7 @@ When the descheduler decides to evict pods from a node, it employs the following
|
||||
* Pods (static or mirrored pods or stand alone pods) not part of an RC, RS, Deployment or Job are
|
||||
never evicted because these pods won't be recreated.
|
||||
* Pods associated with DaemonSets are never evicted.
|
||||
* Pods with local storage are never evicted.
|
||||
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set)
|
||||
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
|
||||
best effort pods are evicted before burstable and guaranteed pods.
|
||||
* All types of pods with the annotation descheduler.alpha.kubernetes.io/evict are evicted. This
|
||||
@@ -386,6 +521,7 @@ packages that it is compiled with.
|
||||
|
||||
Descheduler | Supported Kubernetes Version
|
||||
-------------|-----------------------------
|
||||
v0.20 | v1.20
|
||||
v0.19 | v1.19
|
||||
v0.18 | v1.18
|
||||
v0.10 | v1.17
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: descheduler-helm-chart
|
||||
version: 0.19.0
|
||||
appVersion: 0.19.0
|
||||
name: descheduler
|
||||
version: 0.20.0
|
||||
appVersion: 0.20.0
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
|
||||
@@ -6,12 +6,12 @@
|
||||
|
||||
```shell
|
||||
helm repo add descheduler https://kubernetes-sigs.github.io/descheduler/
|
||||
helm install --name my-release descheduler/descheduler-helm-chart
|
||||
helm install my-release --namespace kube-system descheduler/descheduler
|
||||
```
|
||||
|
||||
## Introduction
|
||||
|
||||
This chart bootstraps a [desheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@@ -22,7 +22,7 @@ This chart bootstraps a [desheduler](https://github.com/kubernetes-sigs/deschedu
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```shell
|
||||
helm install --name my-release descheduler/descheduler-helm-chart
|
||||
helm install --namespace kube-system my-release descheduler/descheduler
|
||||
```
|
||||
|
||||
The command deploys _descheduler_ on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
@@ -43,17 +43,23 @@ The command removes all the Kubernetes components associated with the chart and
|
||||
|
||||
The following table lists the configurable parameters of the _descheduler_ chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` |
|
||||
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
|
||||
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
|
||||
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
||||
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
|
||||
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------ |
|
||||
| `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` |
|
||||
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
|
||||
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
|
||||
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` |
|
||||
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `nil` |
|
||||
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
||||
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
|
||||
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||
| `podSecurityPolicy.create` | If `true`, create PodSecurityPolicy | `true` |
|
||||
| `resources.cpuRequest` | Descheduler container CPU request | `500m` |
|
||||
| `resources.memoryRequest` | Descheduler container memory request | `256Mi` |
|
||||
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
||||
|
||||
@@ -12,6 +12,9 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list", "delete"]
|
||||
@@ -21,4 +24,11 @@ rules:
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
{{- if .Values.podSecurityPolicy.create }}
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames:
|
||||
- {{ template "descheduler.fullname" . }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -7,6 +7,15 @@ metadata:
|
||||
spec:
|
||||
schedule: {{ .Values.schedule | quote }}
|
||||
concurrencyPolicy: "Forbid"
|
||||
{{- if .Values.startingDeadlineSeconds }}
|
||||
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
|
||||
{{- end }}
|
||||
{{- if .Values.successfulJobsHistoryLimit }}
|
||||
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if .Values.failedJobsHistoryLimit }}
|
||||
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
@@ -44,6 +53,8 @@ spec:
|
||||
- {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 16 }}
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
38
charts/descheduler/templates/podsecuritypolicy.yaml
Normal file
38
charts/descheduler/templates/podsecuritypolicy.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
{{- if .Values.podSecurityPolicy.create -}}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
requiredDropCapabilities:
|
||||
- ALL
|
||||
volumes:
|
||||
- 'configMap'
|
||||
- 'secret'
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
fsGroup:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
- min: 1
|
||||
max: 65535
|
||||
readOnlyRootFilesystem: true
|
||||
{{- end -}}
|
||||
@@ -8,10 +8,21 @@ image:
|
||||
tag: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
schedule: "*/2 * * * *"
|
||||
#startingDeadlineSeconds: 200
|
||||
#successfulJobsHistoryLimit: 1
|
||||
#failedJobsHistoryLimit: 1
|
||||
|
||||
cmdOptions:
|
||||
v: 3
|
||||
@@ -51,6 +62,10 @@ rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
|
||||
podSecurityPolicy:
|
||||
# Specifies whether PodSecurityPolicy should be created.
|
||||
create: true
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
|
||||
@@ -14,7 +14,7 @@ steps:
|
||||
- VERSION=$_GIT_TAG
|
||||
- BASE_REF=$_PULL_BASE_REF
|
||||
args:
|
||||
- push
|
||||
- push-all
|
||||
substitutions:
|
||||
# _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
|
||||
# can be used as a substitution
|
||||
|
||||
@@ -18,36 +18,55 @@ limitations under the License.
|
||||
package options
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
// install the componentconfig api so we get its defaulting and conversion functions
|
||||
"k8s.io/component-base/logs"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// DeschedulerServer configuration
|
||||
type DeschedulerServer struct {
|
||||
componentconfig.DeschedulerConfiguration
|
||||
Client clientset.Interface
|
||||
Logs *logs.Options
|
||||
}
|
||||
|
||||
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
||||
func NewDeschedulerServer() *DeschedulerServer {
|
||||
versioned := v1alpha1.DeschedulerConfiguration{}
|
||||
deschedulerscheme.Scheme.Default(&versioned)
|
||||
cfg := componentconfig.DeschedulerConfiguration{}
|
||||
deschedulerscheme.Scheme.Convert(versioned, &cfg, nil)
|
||||
s := DeschedulerServer{
|
||||
DeschedulerConfiguration: cfg,
|
||||
func NewDeschedulerServer() (*DeschedulerServer, error) {
|
||||
cfg, err := newDefaultComponentConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s
|
||||
return &DeschedulerServer{
|
||||
DeschedulerConfiguration: *cfg,
|
||||
Logs: logs.NewOptions(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Validation checks for DeschedulerServer.
|
||||
func (s *DeschedulerServer) Validate() error {
|
||||
var errs []error
|
||||
errs = append(errs, s.Logs.Validate()...)
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, error) {
|
||||
versionedCfg := v1alpha1.DeschedulerConfiguration{}
|
||||
deschedulerscheme.Scheme.Default(&versionedCfg)
|
||||
cfg := componentconfig.DeschedulerConfiguration{}
|
||||
if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
||||
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&rs.Logging.Format, "logging-format", rs.Logging.Format, `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
|
||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
||||
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
|
||||
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
||||
|
||||
@@ -27,28 +27,35 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
aflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/logs"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
||||
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
s := options.NewDeschedulerServer()
|
||||
s, err := options.NewDeschedulerServer()
|
||||
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to initialize server")
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "descheduler",
|
||||
Short: "descheduler",
|
||||
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
s.Logs.LogFormat = s.Logging.Format
|
||||
s.Logs.Apply()
|
||||
|
||||
if err := s.Validate(); err != nil {
|
||||
klog.ErrorS(err, "failed to validate server configuration")
|
||||
}
|
||||
err := Run(s)
|
||||
if err != nil {
|
||||
klog.Errorf("%v", err)
|
||||
klog.ErrorS(err, "descheduler server")
|
||||
}
|
||||
},
|
||||
}
|
||||
cmd.SetOutput(out)
|
||||
|
||||
cmd.SetOut(out)
|
||||
flags := cmd.Flags()
|
||||
flags.SetNormalizeFunc(aflag.WordSepNormalizeFunc)
|
||||
flags.AddGoFlagSet(flag.CommandLine)
|
||||
|
||||
@@ -18,6 +18,7 @@ package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
@@ -25,9 +26,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// gitCommit is a constant representing the source version that
|
||||
// generated this build. It should be set during build via -ldflags.
|
||||
gitCommit string
|
||||
// version is a constant representing the version tag that
|
||||
// generated this build. It should be set during build via -ldflags.
|
||||
version string
|
||||
@@ -40,7 +38,6 @@ var (
|
||||
type Info struct {
|
||||
Major string `json:"major"`
|
||||
Minor string `json:"minor"`
|
||||
GitCommit string `json:"gitCommit"`
|
||||
GitVersion string `json:"gitVersion"`
|
||||
BuildDate string `json:"buildDate"`
|
||||
GoVersion string `json:"goVersion"`
|
||||
@@ -55,7 +52,6 @@ func Get() Info {
|
||||
return Info{
|
||||
Major: majorVersion,
|
||||
Minor: minorVersion,
|
||||
GitCommit: gitCommit,
|
||||
GitVersion: version,
|
||||
BuildDate: buildDate,
|
||||
GoVersion: runtime.Version(),
|
||||
@@ -81,7 +77,18 @@ func splitVersion(version string) (string, string) {
|
||||
if version == "" {
|
||||
return "", ""
|
||||
}
|
||||
// A sample version would be of form v0.1.0-7-ge884046, so split at first '.' and
|
||||
// then return 0 and 1+(+ appended to follow semver convention) for major and minor versions.
|
||||
return strings.Trim(strings.Split(version, ".")[0], "v"), strings.Split(version, ".")[1] + "+"
|
||||
|
||||
// Version from an automated container build environment for a tag. For example v20200521-v0.18.0.
|
||||
m1, _ := regexp.MatchString(`^v\d{8}-v\d+\.\d+\.\d+$`, version)
|
||||
|
||||
// Version from an automated container build environment(not a tag) or a local build. For example v20201009-v0.18.0-46-g939c1c0.
|
||||
m2, _ := regexp.MatchString(`^v\d{8}-v\d+\.\d+\.\d+-\w+-\w+$`, version)
|
||||
|
||||
if m1 || m2 {
|
||||
semVer := strings.Split(version, "-")[1]
|
||||
return strings.Trim(strings.Split(semVer, ".")[0], "v"), strings.Split(semVer, ".")[1] + "+"
|
||||
}
|
||||
|
||||
// Something went wrong
|
||||
return "", ""
|
||||
}
|
||||
|
||||
@@ -17,10 +17,9 @@ limitations under the License.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"k8s.io/component-base/logs"
|
||||
"os"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
||||
)
|
||||
|
||||
@@ -28,7 +27,10 @@ func main() {
|
||||
out := os.Stdout
|
||||
cmd := app.NewDeschedulerCommand(out)
|
||||
cmd.AddCommand(app.NewVersionCommand())
|
||||
flag.CommandLine.Parse([]string{})
|
||||
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
|
||||
if err := cmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
- [Go 1.15+](https://golang.org/dl/)
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
|
||||
- [kind](https://kind.sigs.k8s.io/)
|
||||
- [kind v0.9.0+](https://kind.sigs.k8s.io/)
|
||||
|
||||
## Build and Run
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
|
||||
4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
||||
5. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION`
|
||||
6. Build and push the container image to the staging registry `VERSION=$VERSION make push`
|
||||
6. Build and push the container image to the staging registry `VERSION=$VERSION make push-all`
|
||||
7. Publish a draft release using the tag you just created
|
||||
8. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
|
||||
9. Publish release
|
||||
|
||||
@@ -3,6 +3,16 @@
|
||||
Starting with descheduler release v0.10.0 container images are available in the official k8s container registry.
|
||||
* `k8s.gcr.io/descheduler/descheduler`
|
||||
|
||||
Also, starting with descheduler release v0.20.0 multi-arch container images are provided. Currently AMD64 and ARM64
|
||||
container images are provided. Multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from
|
||||
a registry. Therefore starting with descheduler release v0.20.0 use the below process to download the official descheduler
|
||||
image into a kind cluster.
|
||||
```
|
||||
kind create cluster
|
||||
docker pull k8s.gcr.io/descheduler/descheduler:v0.20.0
|
||||
kind load docker-image k8s.gcr.io/descheduler/descheduler:v0.20.0
|
||||
```
|
||||
|
||||
## Policy Configuration Examples
|
||||
The [examples](https://github.com/kubernetes-sigs/descheduler/tree/master/examples) directory has descheduler policy configuration examples.
|
||||
|
||||
@@ -69,24 +79,35 @@ This policy configuration file ensures that pods created more than 7 days ago ar
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: false
|
||||
"RemoveDuplicates":
|
||||
enabled: false
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: false
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: false
|
||||
"RemovePodsViolatingNodeTaints":
|
||||
enabled: false
|
||||
"RemovePodsHavingTooManyRestarts":
|
||||
enabled: false
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
|
||||
```
|
||||
|
||||
### Balance Cluster By Node Memory Utilization
|
||||
|
||||
If your cluster has been running for a long period of time, you may find that the resource utilization is not very
|
||||
balanced. The `LowNodeUtilization` strategy can be used to rebalance your cluster based on `cpu`, `memory`
|
||||
or `number of pods`.
|
||||
|
||||
Using the following policy configuration file, descheduler will rebalance the cluster based on memory by evicting pods
|
||||
from nodes with memory utilization over 70% to nodes with memory utilization below 20%.
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
targetThresholds:
|
||||
"memory": 70
|
||||
```
|
||||
|
||||
### Autoheal Node Problems
|
||||
Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with
|
||||
[Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and
|
||||
|
||||
12
examples/low-node-utilization.yml
Normal file
12
examples/low-node-utilization.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
targetThresholds:
|
||||
"memory": 70
|
||||
@@ -2,19 +2,8 @@
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: false
|
||||
"RemoveDuplicates":
|
||||
enabled: false
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: false
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: false
|
||||
"RemovePodsViolatingNodeTaints":
|
||||
enabled: false
|
||||
"RemovePodsHavingTooManyRestarts":
|
||||
enabled: false
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
maxPodLifeTimeSeconds: 604800 # 7 days
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 604800 # 7 days
|
||||
|
||||
16
go.mod
16
go.mod
@@ -3,13 +3,15 @@ module sigs.k8s.io/descheduler
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/pflag v1.0.5
|
||||
k8s.io/api v0.19.0
|
||||
k8s.io/apimachinery v0.19.0
|
||||
k8s.io/apiserver v0.19.0
|
||||
k8s.io/client-go v0.19.0
|
||||
k8s.io/code-generator v0.19.0
|
||||
k8s.io/component-base v0.19.0
|
||||
k8s.io/klog/v2 v2.2.0
|
||||
k8s.io/api v0.20.0
|
||||
k8s.io/apimachinery v0.20.0
|
||||
k8s.io/apiserver v0.20.0
|
||||
k8s.io/client-go v0.20.0
|
||||
k8s.io/code-generator v0.20.0
|
||||
k8s.io/component-base v0.20.0
|
||||
k8s.io/component-helpers v0.20.0
|
||||
k8s.io/klog/v2 v2.4.0
|
||||
)
|
||||
|
||||
213
go.sum
213
go.sum
@@ -6,42 +6,45 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM=
|
||||
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest v0.9.6 h1:5YWtOnckcudzIw8lPPBcWOnmIFWMtHci1ZWAZulMSx0=
|
||||
github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.11.1 h1:eVvIXUKiTgv++6YnWb42DUA1YL7qDugnKP0HljexdnQ=
|
||||
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
|
||||
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
@@ -49,16 +52,18 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
@@ -90,13 +95,17 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
@@ -105,18 +114,14 @@ github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||
github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc=
|
||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
@@ -125,20 +130,26 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
@@ -147,6 +158,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@@ -155,6 +168,9 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@@ -162,16 +178,20 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
@@ -210,7 +230,6 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
|
||||
@@ -262,6 +281,7 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
@@ -289,6 +309,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
@@ -298,10 +320,11 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
@@ -316,14 +339,20 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@@ -332,12 +361,16 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
@@ -355,20 +388,27 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -390,32 +430,43 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 h1:5/PjkGUjvEU5Gl6BxmvKRPpqo2uNMv4rcHBMwzk/st8=
|
||||
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY=
|
||||
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@@ -433,20 +484,40 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8=
|
||||
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
||||
@@ -462,14 +533,25 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -480,6 +562,8 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
@@ -504,6 +588,8 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
@@ -511,33 +597,40 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
k8s.io/api v0.19.0 h1:XyrFIJqTYZJ2DU7FBE/bSPz7b1HvbVBuBf07oeo6eTc=
|
||||
k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw=
|
||||
k8s.io/apimachinery v0.19.0 h1:gjKnAda/HZp5k4xQYjL0K/Yb66IvNqjthCb03QlKpaQ=
|
||||
k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
|
||||
k8s.io/apiserver v0.19.0 h1:jLhrL06wGAADbLUUQm8glSLnAGP6c7y5R3p19grkBoY=
|
||||
k8s.io/apiserver v0.19.0/go.mod h1:XvzqavYj73931x7FLtyagh8WibHpePJ1QwWrSJs2CLk=
|
||||
k8s.io/client-go v0.19.0 h1:1+0E0zfWFIWeyRhQYWzimJOyAk2UT7TiARaLNwJCf7k=
|
||||
k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU=
|
||||
k8s.io/code-generator v0.19.0 h1:r0BxYnttP/r8uyKd4+Njg0B57kKi8wLvwEzaaVy3iZ8=
|
||||
k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk=
|
||||
k8s.io/component-base v0.19.0 h1:OueXf1q3RW7NlLlUCj2Dimwt7E1ys6ZqRnq53l2YuoE=
|
||||
k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.20.0 h1:WwrYoZNM1W1aQEbyl8HNG+oWGzLpZQBlcerS9BQw9yI=
|
||||
k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg=
|
||||
k8s.io/apimachinery v0.20.0 h1:jjzbTJRXk0unNS71L7h3lxGDH/2HPxMPaQY+MjECKL8=
|
||||
k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apiserver v0.20.0 h1:0MwO4xCoqZwhoLbFyyBSJdu55CScp4V4sAgX6z4oPBY=
|
||||
k8s.io/apiserver v0.20.0/go.mod h1:6gRIWiOkvGvQt12WTYmsiYoUyYW0FXSiMdNl4m+sxY8=
|
||||
k8s.io/client-go v0.20.0 h1:Xlax8PKbZsjX4gFvNtt4F5MoJ1V5prDvCuoq9B7iax0=
|
||||
k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY=
|
||||
k8s.io/code-generator v0.20.0 h1:c8JaABvEEZPDE8MICTOtveHX2axchl+EptM+o4OGvbg=
|
||||
k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
|
||||
k8s.io/component-base v0.20.0 h1:BXGL8iitIQD+0NgW49UsM7MraNUUGDU3FBmrfUAtmVQ=
|
||||
k8s.io/component-base v0.20.0/go.mod h1:wKPj+RHnAr8LW2EIBIK7AxOHPde4gme2lzXwVSoRXeA=
|
||||
k8s.io/component-helpers v0.20.0 h1:7Zi1fcb5nV0h03d9eeZGk71+ZWYvAN4Be+xMOZyFerc=
|
||||
k8s.io/component-helpers v0.20.0/go.mod h1:nx6NOtfSfGOxnSZsDJxpGbnsVuUA1UXpwDvZIrtigNk=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14 h1:t4L10Qfx/p7ASH3gXCdIUtPbbIuegCoUJf3TMSFekjw=
|
||||
k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20201113003025-83324d819ded h1:JApXBKYyB7l9xx+DK7/+mFjC7A9Bt5A93FPvFD0HIFE=
|
||||
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ=
|
||||
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg=
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
|
||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
||||
6
hack/.spelling_failures
Normal file
6
hack/.spelling_failures
Normal file
@@ -0,0 +1,6 @@
|
||||
BUILD
|
||||
CHANGELOG
|
||||
OWNERS
|
||||
go.mod
|
||||
go.sum
|
||||
vendor/
|
||||
@@ -1,5 +1,5 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.sigs.k8s.io/v1alpha3
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
|
||||
@@ -19,4 +19,7 @@ limitations under the License.
|
||||
// This package imports things required by build scripts, to force `go mod` to see them as dependencies
|
||||
package tools
|
||||
|
||||
import _ "k8s.io/code-generator"
|
||||
import (
|
||||
_ "github.com/client9/misspell/cmd/misspell"
|
||||
_ "k8s.io/code-generator"
|
||||
)
|
||||
|
||||
41
hack/verify-spelling.sh
Executable file
41
hack/verify-spelling.sh
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script checks commonly misspelled English words in all files in the
|
||||
# working directory by client9/misspell package.
|
||||
# Usage: `hack/verify-spelling.sh`.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
export KUBE_ROOT
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
# Ensure that we find the binaries we build before anything else.
|
||||
export GOBIN="${OS_OUTPUT_BINPATH}"
|
||||
PATH="${GOBIN}:${PATH}"
|
||||
|
||||
# Install tools we need
|
||||
pushd "${KUBE_ROOT}" >/dev/null
|
||||
GO111MODULE=on go install github.com/client9/misspell/cmd/misspell
|
||||
popd >/dev/null
|
||||
|
||||
# Spell checking
|
||||
# All the skipping files are defined in hack/.spelling_failures
|
||||
skipping_file="${KUBE_ROOT}/hack/.spelling_failures"
|
||||
failing_packages=$(sed "s| | -e |g" "${skipping_file}")
|
||||
git ls-files | grep -v -e "${failing_packages}" | xargs misspell -i "Creater,creater,ect" -error -o stderr
|
||||
6
kubernetes/base/kustomization.yaml
Normal file
6
kubernetes/base/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- configmap.yaml
|
||||
- rbac.yaml
|
||||
@@ -3,7 +3,6 @@ kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: descheduler-cluster-role
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
@@ -11,6 +10,9 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list", "delete"]
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.19.0
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.20.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
@@ -27,6 +27,10 @@ spec:
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--v"
|
||||
- "3"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: "256Mi"
|
||||
restartPolicy: "Never"
|
||||
serviceAccountName: descheduler-sa
|
||||
volumes:
|
||||
6
kubernetes/cronjob/kustomization.yaml
Normal file
6
kubernetes/cronjob/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ../base
|
||||
- cronjob.yaml
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.19.0
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.20.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
@@ -25,6 +25,10 @@ spec:
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--v"
|
||||
- "3"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: "256Mi"
|
||||
restartPolicy: "Never"
|
||||
serviceAccountName: descheduler-sa
|
||||
volumes:
|
||||
6
kubernetes/job/kustomization.yaml
Normal file
6
kubernetes/job/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ../base
|
||||
- job.yaml
|
||||
@@ -67,7 +67,7 @@ type StrategyParameters struct {
|
||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds
|
||||
NodeAffinityType []string
|
||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts
|
||||
MaxPodLifeTimeSeconds *uint
|
||||
PodLifeTime *PodLifeTime
|
||||
RemoveDuplicates *RemoveDuplicates
|
||||
Namespaces *Namespaces
|
||||
ThresholdPriority *int32
|
||||
@@ -91,3 +91,8 @@ type PodsHavingTooManyRestarts struct {
|
||||
type RemoveDuplicates struct {
|
||||
ExcludeOwnerKinds []string
|
||||
}
|
||||
|
||||
type PodLifeTime struct {
|
||||
MaxPodLifeTimeSeconds *uint
|
||||
PodStatusPhases []string
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ type StrategyParameters struct {
|
||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
||||
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
|
||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
|
||||
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
|
||||
PodLifeTime *PodLifeTime `json:"podLifeTime,omitempty"`
|
||||
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
|
||||
Namespaces *Namespaces `json:"namespaces"`
|
||||
ThresholdPriority *int32 `json:"thresholdPriority"`
|
||||
@@ -89,3 +89,8 @@ type PodsHavingTooManyRestarts struct {
|
||||
type RemoveDuplicates struct {
|
||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
||||
}
|
||||
|
||||
type PodLifeTime struct {
|
||||
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
|
||||
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
|
||||
}
|
||||
|
||||
@@ -75,6 +75,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*PodLifeTime)(nil), (*api.PodLifeTime)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime(a.(*PodLifeTime), b.(*api.PodLifeTime), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.PodLifeTime)(nil), (*PodLifeTime)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime(a.(*api.PodLifeTime), b.(*PodLifeTime), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*PodsHavingTooManyRestarts)(nil), (*api.PodsHavingTooManyRestarts)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(a.(*PodsHavingTooManyRestarts), b.(*api.PodsHavingTooManyRestarts), scope)
|
||||
}); err != nil {
|
||||
@@ -204,6 +214,28 @@ func Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtili
|
||||
return autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in *PodLifeTime, out *api.PodLifeTime, s conversion.Scope) error {
|
||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
||||
out.PodStatusPhases = *(*[]string)(unsafe.Pointer(&in.PodStatusPhases))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in *PodLifeTime, out *api.PodLifeTime, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in *api.PodLifeTime, out *PodLifeTime, s conversion.Scope) error {
|
||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
||||
out.PodStatusPhases = *(*[]string)(unsafe.Pointer(&in.PodStatusPhases))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime is an autogenerated conversion function.
|
||||
func Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in *api.PodLifeTime, out *PodLifeTime, s conversion.Scope) error {
|
||||
return autoConvert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in *PodsHavingTooManyRestarts, out *api.PodsHavingTooManyRestarts, s conversion.Scope) error {
|
||||
out.PodRestartThreshold = in.PodRestartThreshold
|
||||
out.IncludingInitContainers = in.IncludingInitContainers
|
||||
@@ -250,7 +282,7 @@ func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *Strat
|
||||
out.NodeResourceUtilizationThresholds = (*api.NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
|
||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
||||
out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
||||
out.PodLifeTime = (*api.PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
|
||||
out.RemoveDuplicates = (*api.RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
||||
out.Namespaces = (*api.Namespaces)(unsafe.Pointer(in.Namespaces))
|
||||
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
|
||||
@@ -267,7 +299,7 @@ func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.S
|
||||
out.NodeResourceUtilizationThresholds = (*NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
|
||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
||||
out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
||||
out.PodLifeTime = (*PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
|
||||
out.RemoveDuplicates = (*RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
|
||||
out.Namespaces = (*Namespaces)(unsafe.Pointer(in.Namespaces))
|
||||
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
|
||||
|
||||
@@ -148,6 +148,32 @@ func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilization
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
|
||||
*out = *in
|
||||
if in.MaxPodLifeTimeSeconds != nil {
|
||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.PodStatusPhases != nil {
|
||||
in, out := &in.PodStatusPhases, &out.PodStatusPhases
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
|
||||
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodLifeTime)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
||||
*out = *in
|
||||
@@ -247,10 +273,10 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = new(PodsHavingTooManyRestarts)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxPodLifeTimeSeconds != nil {
|
||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
if in.PodLifeTime != nil {
|
||||
in, out := &in.PodLifeTime, &out.PodLifeTime
|
||||
*out = new(PodLifeTime)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.RemoveDuplicates != nil {
|
||||
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
|
||||
|
||||
@@ -148,6 +148,32 @@ func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilization
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
|
||||
*out = *in
|
||||
if in.MaxPodLifeTimeSeconds != nil {
|
||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.PodStatusPhases != nil {
|
||||
in, out := &in.PodStatusPhases, &out.PodStatusPhases
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
|
||||
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodLifeTime)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
||||
*out = *in
|
||||
@@ -247,10 +273,10 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = new(PodsHavingTooManyRestarts)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxPodLifeTimeSeconds != nil {
|
||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
if in.PodLifeTime != nil {
|
||||
in, out := &in.PodLifeTime, &out.PodLifeTime
|
||||
*out = new(PodLifeTime)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.RemoveDuplicates != nil {
|
||||
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -48,4 +49,8 @@ type DeschedulerConfiguration struct {
|
||||
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool
|
||||
|
||||
// Logging specifies the options of logging.
|
||||
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
|
||||
Logging componentbaseconfig.LoggingConfiguration
|
||||
}
|
||||
|
||||
@@ -70,13 +70,14 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
|
||||
strategyFuncs := map[string]strategyFunction{
|
||||
"RemoveDuplicates": strategies.RemoveDuplicatePods,
|
||||
"LowNodeUtilization": strategies.LowNodeUtilization,
|
||||
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
|
||||
"RemovePodsViolatingNodeAffinity": strategies.RemovePodsViolatingNodeAffinity,
|
||||
"RemovePodsViolatingNodeTaints": strategies.RemovePodsViolatingNodeTaints,
|
||||
"RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts,
|
||||
"PodLifeTime": strategies.PodLifeTime,
|
||||
"RemoveDuplicates": strategies.RemoveDuplicatePods,
|
||||
"LowNodeUtilization": strategies.LowNodeUtilization,
|
||||
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
|
||||
"RemovePodsViolatingNodeAffinity": strategies.RemovePodsViolatingNodeAffinity,
|
||||
"RemovePodsViolatingNodeTaints": strategies.RemovePodsViolatingNodeTaints,
|
||||
"RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts,
|
||||
"PodLifeTime": strategies.PodLifeTime,
|
||||
"RemovePodsViolatingTopologySpreadConstraint": strategies.RemovePodsViolatingTopologySpreadConstraint,
|
||||
}
|
||||
|
||||
nodeSelector := rs.NodeSelector
|
||||
@@ -97,13 +98,13 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
wait.Until(func() {
|
||||
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector, stopChannel)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get ready nodes: %v", err)
|
||||
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
|
||||
close(stopChannel)
|
||||
return
|
||||
}
|
||||
|
||||
if len(nodes) <= 1 {
|
||||
klog.V(1).Infof("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||
close(stopChannel)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -38,7 +38,10 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
stopChannel := make(chan struct{})
|
||||
defer close(stopChannel)
|
||||
|
||||
rs := options.NewDeschedulerServer()
|
||||
rs, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize server: %v", err)
|
||||
}
|
||||
rs.Client = client
|
||||
rs.DeschedulingInterval = 100 * time.Millisecond
|
||||
go func() {
|
||||
|
||||
@@ -106,17 +106,17 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
|
||||
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
|
||||
if err != nil {
|
||||
// err is used only for logging purposes
|
||||
klog.Errorf("Error evicting pod: %#v in namespace %#v%s: %#v", pod.Name, pod.Namespace, reason, err)
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", reason)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pe.nodepodCount[node]++
|
||||
if pe.dryRun {
|
||||
klog.V(1).Infof("Evicted pod in dry run mode: %#v in namespace %#v%s", pod.Name, pod.Namespace, reason)
|
||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason)
|
||||
} else {
|
||||
klog.V(1).Infof("Evicted pod: %#v in namespace %#v%s", pod.Name, pod.Namespace, reason)
|
||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", reason)
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.V(3).Infof)
|
||||
eventBroadcaster.StartStructuredLogging(3)
|
||||
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})
|
||||
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
|
||||
r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", reason))
|
||||
@@ -227,7 +227,7 @@ func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
if len(checkErrs) > 0 && !HaveEvictAnnotation(pod) {
|
||||
klog.V(4).Infof("Pod %s in namespace %s is not evictable: Pod lacks an eviction annotation and fails the following checks: %v", pod.Name, pod.Namespace, errors.NewAggregate(checkErrs).Error())
|
||||
klog.V(4).InfoS("Pod lacks an eviction annotation and fails the following checks", "pod", klog.KObj(pod), "checks", errors.NewAggregate(checkErrs).Error())
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
@@ -18,7 +18,8 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"k8s.io/api/core/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
@@ -42,7 +43,7 @@ func ReadyNodes(ctx context.Context, client clientset.Interface, nodeInformer co
|
||||
}
|
||||
|
||||
if len(nodes) == 0 {
|
||||
klog.V(2).Infof("node lister returned empty list, now fetch directly")
|
||||
klog.V(2).InfoS("Node lister returned empty list, now fetch directly")
|
||||
|
||||
nItems, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: nodeSelector})
|
||||
if err != nil {
|
||||
@@ -77,19 +78,19 @@ func IsReady(node *v1.Node) bool {
|
||||
// - NodeOutOfDisk condition status is ConditionFalse,
|
||||
// - NodeNetworkUnavailable condition status is ConditionFalse.
|
||||
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
|
||||
klog.V(1).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
klog.V(1).InfoS("Ignoring node", "node", klog.KObj(node), "condition", cond.Type, "status", cond.Status)
|
||||
return false
|
||||
} /*else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
|
||||
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
klog.V(4).InfoS("Ignoring node with condition status", "node", klog.KObj(node.Name), "condition", cond.Type, "status", cond.Status)
|
||||
return false
|
||||
} else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse {
|
||||
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
klog.V(4).InfoS("Ignoring node with condition status", "node", klog.KObj(node.Name), "condition", cond.Type, "status", cond.Status)
|
||||
return false
|
||||
}*/
|
||||
}
|
||||
// Ignore nodes that are marked unschedulable
|
||||
/*if node.Spec.Unschedulable {
|
||||
klog.V(4).Infof("Ignoring node %v since it is unschedulable", node.Name)
|
||||
klog.V(4).InfoS("Ignoring node since it is unschedulable", "node", klog.KObj(node.Name))
|
||||
return false
|
||||
}*/
|
||||
return true
|
||||
@@ -112,7 +113,7 @@ func PodFitsAnyNode(pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
continue
|
||||
}
|
||||
if !IsNodeUnschedulable(node) {
|
||||
klog.V(2).Infof("Pod %v can possibly be scheduled on %v", pod.Name, node.Name)
|
||||
klog.V(2).InfoS("Pod can possibly be scheduled on a different node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -125,15 +126,15 @@ func PodFitsCurrentNode(pod *v1.Pod, node *v1.Node) bool {
|
||||
ok, err := utils.PodMatchNodeSelector(pod, node)
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
klog.ErrorS(err, "Failed to match node selector")
|
||||
return false
|
||||
}
|
||||
|
||||
if !ok {
|
||||
klog.V(2).Infof("Pod %v does not fit on node %v", pod.Name, node.Name)
|
||||
klog.V(2).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return false
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Pod %v fits on node %v", pod.Name, node.Name)
|
||||
klog.V(2).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -108,7 +108,7 @@ func ListPodsOnANode(
|
||||
}
|
||||
|
||||
// INFO(jchaloup): field selectors do not work properly with listers
|
||||
// Once the descheduler switcheds to pod listers (through informers),
|
||||
// Once the descheduler switches to pod listers (through informers),
|
||||
// We need to flip to client-side filtering.
|
||||
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(ctx,
|
||||
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
|
||||
func LoadPolicyConfig(policyConfigFile string) (*api.DeschedulerPolicy, error) {
|
||||
if policyConfigFile == "" {
|
||||
klog.V(1).Infof("policy config file not specified")
|
||||
klog.V(1).InfoS("Policy config file not specified")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ package strategies
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -39,6 +40,10 @@ func validateRemoveDuplicatePodsParams(params *api.StrategyParameters) error {
|
||||
if params == nil {
|
||||
return nil
|
||||
}
|
||||
// At most one of include/exclude can be set
|
||||
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
}
|
||||
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||
return fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
||||
}
|
||||
@@ -46,6 +51,11 @@ func validateRemoveDuplicatePodsParams(params *api.StrategyParameters) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type podOwner struct {
|
||||
namespace, kind, name string
|
||||
imagesHash string
|
||||
}
|
||||
|
||||
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
|
||||
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
||||
// namespace, and have at least one container with the same image.
|
||||
@@ -58,26 +68,43 @@ func RemoveDuplicatePods(
|
||||
podEvictor *evictions.PodEvictor,
|
||||
) {
|
||||
if err := validateRemoveDuplicatePodsParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
klog.ErrorS(err, "Invalid RemoveDuplicatePods parameters")
|
||||
return
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("failed to get threshold priority from strategy's params: %#v", err)
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces []string
|
||||
if strategy.Params != nil && strategy.Params.Namespaces != nil {
|
||||
includedNamespaces = strategy.Params.Namespaces.Include
|
||||
excludedNamespaces = strategy.Params.Namespaces.Exclude
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
duplicatePods := make(map[podOwner]map[string][]*v1.Pod)
|
||||
ownerKeyOccurence := make(map[podOwner]int32)
|
||||
nodeCount := 0
|
||||
nodeMap := make(map[string]*v1.Node)
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node, podutil.WithFilter(evictable.IsEvictable))
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(ctx,
|
||||
client,
|
||||
node,
|
||||
podutil.WithFilter(evictable.IsEvictable),
|
||||
podutil.WithNamespaces(includedNamespaces),
|
||||
podutil.WithoutNamespaces(excludedNamespaces),
|
||||
)
|
||||
if err != nil {
|
||||
klog.Errorf("error listing evictable pods on node %s: %+v", node.Name, err)
|
||||
klog.ErrorS(err, "Error listing evictable pods on node", "node", klog.KObj(node))
|
||||
continue
|
||||
}
|
||||
|
||||
duplicatePods := make([]*v1.Pod, 0, len(pods))
|
||||
nodeMap[node.Name] = node
|
||||
nodeCount++
|
||||
// Each pod has a list of owners and a list of containers, and each container has 1 image spec.
|
||||
// For each pod, we go through all the OwnerRef/Image mappings and represent them as a "key" string.
|
||||
// All of those mappings together makes a list of "key" strings that essentially represent that pod's uniqueness.
|
||||
@@ -98,12 +125,25 @@ func RemoveDuplicatePods(
|
||||
continue
|
||||
}
|
||||
podContainerKeys := make([]string, 0, len(ownerRefList)*len(pod.Spec.Containers))
|
||||
imageList := []string{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
imageList = append(imageList, container.Image)
|
||||
}
|
||||
sort.Strings(imageList)
|
||||
imagesHash := strings.Join(imageList, "#")
|
||||
for _, ownerRef := range ownerRefList {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
ownerKey := podOwner{
|
||||
namespace: pod.ObjectMeta.Namespace,
|
||||
kind: ownerRef.Kind,
|
||||
name: ownerRef.Name,
|
||||
imagesHash: imagesHash,
|
||||
}
|
||||
ownerKeyOccurence[ownerKey] = ownerKeyOccurence[ownerKey] + 1
|
||||
for _, image := range imageList {
|
||||
// Namespace/Kind/Name should be unique for the cluster.
|
||||
// We also consider the image, as 2 pods could have the same owner but serve different purposes
|
||||
// So any non-unique Namespace/Kind/Name/Image pattern is a duplicate pod.
|
||||
s := strings.Join([]string{pod.ObjectMeta.Namespace, ownerRef.Kind, ownerRef.Name, container.Image}, "/")
|
||||
s := strings.Join([]string{pod.ObjectMeta.Namespace, ownerRef.Kind, ownerRef.Name, image}, "/")
|
||||
podContainerKeys = append(podContainerKeys, s)
|
||||
}
|
||||
}
|
||||
@@ -115,7 +155,19 @@ func RemoveDuplicatePods(
|
||||
for _, keys := range existing {
|
||||
if reflect.DeepEqual(keys, podContainerKeys) {
|
||||
matched = true
|
||||
duplicatePods = append(duplicatePods, pod)
|
||||
klog.V(3).InfoS("Duplicate found", "pod", klog.KObj(pod))
|
||||
for _, ownerRef := range ownerRefList {
|
||||
ownerKey := podOwner{
|
||||
namespace: pod.ObjectMeta.Namespace,
|
||||
kind: ownerRef.Kind,
|
||||
name: ownerRef.Name,
|
||||
imagesHash: imagesHash,
|
||||
}
|
||||
if _, ok := duplicatePods[ownerKey]; !ok {
|
||||
duplicatePods[ownerKey] = make(map[string][]*v1.Pod)
|
||||
}
|
||||
duplicatePods[ownerKey][node.Name] = append(duplicatePods[ownerKey][node.Name], pod)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -128,11 +180,23 @@ func RemoveDuplicatePods(
|
||||
duplicateKeysMap[podContainerKeys[0]] = [][]string{podContainerKeys}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range duplicatePods {
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, node, "RemoveDuplicatePods"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
break
|
||||
// 1. how many pods can be evicted to respect uniform placement of pods among viable nodes?
|
||||
for ownerKey, nodes := range duplicatePods {
|
||||
upperAvg := int(math.Ceil(float64(ownerKeyOccurence[ownerKey]) / float64(nodeCount)))
|
||||
for nodeName, pods := range nodes {
|
||||
klog.V(2).InfoS("Average occurrence per node", "node", klog.KObj(nodeMap[nodeName]), "ownerKey", ownerKey, "avg", upperAvg)
|
||||
// list of duplicated pods does not contain the original referential pod
|
||||
if len(pods)+1 > upperAvg {
|
||||
// It's assumed all duplicated pods are in the same priority class
|
||||
// TODO(jchaloup): check if the pod has a different node to lend to
|
||||
for _, pod := range pods[upperAvg-1:] {
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[nodeName], "RemoveDuplicatePods"); err != nil {
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,8 +20,9 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
@@ -31,34 +32,45 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func buildTestPodWithImage(podName, node, image string) *v1.Pod {
|
||||
pod := test.BuildTestPod(podName, 100, 0, node, test.SetRSOwnerRef)
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
|
||||
Name: image,
|
||||
Image: image,
|
||||
})
|
||||
return pod
|
||||
}
|
||||
|
||||
func TestFindDuplicatePods(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// first setup pods
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p1.Namespace = "dev"
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||
p2.Namespace = "dev"
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||
p3.Namespace = "dev"
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name, nil)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node1.Name, nil)
|
||||
p7.Namespace = "kube-system"
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node.Name, nil)
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
|
||||
p8.Namespace = "test"
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node.Name, nil)
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node1.Name, nil)
|
||||
p9.Namespace = "test"
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node.Name, nil)
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node1.Name, nil)
|
||||
p10.Namespace = "test"
|
||||
p11 := test.BuildTestPod("p11", 100, 0, node.Name, nil)
|
||||
p11 := test.BuildTestPod("p11", 100, 0, node1.Name, nil)
|
||||
p11.Namespace = "different-images"
|
||||
p12 := test.BuildTestPod("p12", 100, 0, node.Name, nil)
|
||||
p12 := test.BuildTestPod("p12", 100, 0, node1.Name, nil)
|
||||
p12.Namespace = "different-images"
|
||||
p13 := test.BuildTestPod("p13", 100, 0, node.Name, nil)
|
||||
p13 := test.BuildTestPod("p13", 100, 0, node1.Name, nil)
|
||||
p13.Namespace = "different-images"
|
||||
p14 := test.BuildTestPod("p14", 100, 0, node.Name, nil)
|
||||
p14 := test.BuildTestPod("p14", 100, 0, node1.Name, nil)
|
||||
p14.Namespace = "different-images"
|
||||
|
||||
// ### Evictable Pods ###
|
||||
@@ -121,10 +133,10 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
strategy api.DeschedulerStrategy
|
||||
}{
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 2 should be evicted.",
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
expectedEvictedPodCount: 2,
|
||||
expectedEvictedPodCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
@@ -135,17 +147,17 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{RemoveDuplicates: &api.RemoveDuplicates{ExcludeOwnerKinds: []string{"ReplicaSet"}}}},
|
||||
},
|
||||
{
|
||||
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 2 should be evicted.",
|
||||
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p8, *p9, *p10},
|
||||
expectedEvictedPodCount: 2,
|
||||
expectedEvictedPodCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p8, *p9, *p10},
|
||||
expectedEvictedPodCount: 4,
|
||||
expectedEvictedPodCount: 2,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
@@ -159,7 +171,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
description: "Test all Pods: 4 should be evicted.",
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10},
|
||||
expectedEvictedPodCount: 4,
|
||||
expectedEvictedPodCount: 2,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
@@ -186,27 +198,220 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: testCase.pods}, nil
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
})
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
testCase.maxPodsToEvictPerNode,
|
||||
[]*v1.Node{node},
|
||||
false,
|
||||
)
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: testCase.pods}, nil
|
||||
})
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
testCase.maxPodsToEvictPerNode,
|
||||
[]*v1.Node{node1, node2},
|
||||
false,
|
||||
)
|
||||
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, []*v1.Node{node}, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||
}
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, []*v1.Node{node1, node2}, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
setRSOwnerRef2 := func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||
{Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-2"},
|
||||
}
|
||||
}
|
||||
setTwoRSOwnerRef := func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||
{Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-1"},
|
||||
{Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-2"},
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
maxPodsToEvictPerNode int
|
||||
pods []v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedEvictedPodCount int
|
||||
strategy api.DeschedulerStrategy
|
||||
}{
|
||||
{
|
||||
description: "Evict pods uniformly",
|
||||
pods: []v1.Pod{
|
||||
// (5,3,1) -> (3,3,3) -> 2 evictions
|
||||
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly with one node left out",
|
||||
pods: []v1.Pod{
|
||||
// (5,3,1) -> (4,4,1) -> 1 eviction
|
||||
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly with two replica sets",
|
||||
pods: []v1.Pod{
|
||||
// (5,3,1) -> (3,3,3) -> 2 evictions
|
||||
*test.BuildTestPod("p11", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p12", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p13", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p14", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p15", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p16", 100, 0, "n2", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p17", 100, 0, "n2", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p18", 100, 0, "n2", setTwoRSOwnerRef),
|
||||
*test.BuildTestPod("p19", 100, 0, "n3", setTwoRSOwnerRef),
|
||||
},
|
||||
expectedEvictedPodCount: 4,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly with two owner references",
|
||||
pods: []v1.Pod{
|
||||
// (5,3,1) -> (3,3,3) -> 2 evictions
|
||||
*test.BuildTestPod("p11", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p12", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p13", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p14", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p15", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p16", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p17", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p18", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p19", 100, 0, "n3", test.SetRSOwnerRef),
|
||||
// (1,3,5) -> (3,3,3) -> 2 evictions
|
||||
*test.BuildTestPod("p21", 100, 0, "n1", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p22", 100, 0, "n2", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p23", 100, 0, "n2", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p24", 100, 0, "n2", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p25", 100, 0, "n3", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p26", 100, 0, "n3", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p27", 100, 0, "n3", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p28", 100, 0, "n3", setRSOwnerRef2),
|
||||
*test.BuildTestPod("p29", 100, 0, "n3", setRSOwnerRef2),
|
||||
},
|
||||
expectedEvictedPodCount: 4,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods with number of pods less than nodes",
|
||||
pods: []v1.Pod{
|
||||
// (2,0,0) -> (1,1,0) -> 1 eviction
|
||||
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods with number of pods less than nodes, but ignore different pods with the same ownerref",
|
||||
pods: []v1.Pod{
|
||||
// (1, 0, 0) for "bar","baz" images -> no eviction, even with a matching ownerKey
|
||||
// (2, 0, 0) for "foo" image -> (1,1,0) - 1 eviction
|
||||
// In this case the only "real" duplicates are p1 and p4, so one of those should be evicted
|
||||
*buildTestPodWithImage("p1", "n1", "foo"),
|
||||
*buildTestPodWithImage("p2", "n1", "bar"),
|
||||
*buildTestPodWithImage("p3", "n1", "baz"),
|
||||
*buildTestPodWithImage("p4", "n1", "foo"),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
{
|
||||
description: "Evict pods with a single pod with three nodes",
|
||||
pods: []v1.Pod{
|
||||
// (2,0,0) -> (1,1,0) -> 1 eviction
|
||||
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
},
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: testCase.pods}, nil
|
||||
})
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
testCase.maxPodsToEvictPerNode,
|
||||
testCase.nodes,
|
||||
false,
|
||||
)
|
||||
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,11 +33,14 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
// NodeUsageMap stores a node's info, pods on it and its resource usage
|
||||
type NodeUsageMap struct {
|
||||
// NodeUsage stores a node's info, pods on it, thresholds and its resource usage
|
||||
type NodeUsage struct {
|
||||
node *v1.Node
|
||||
usage api.ResourceThresholds
|
||||
usage map[v1.ResourceName]*resource.Quantity
|
||||
allPods []*v1.Pod
|
||||
|
||||
lowResourceThreshold map[v1.ResourceName]*resource.Quantity
|
||||
highResourceThreshold map[v1.ResourceName]*resource.Quantity
|
||||
}
|
||||
|
||||
// NodePodsMap is a set of (node, pods) pairs
|
||||
@@ -66,19 +69,19 @@ func validateLowNodeUtilizationParams(params *api.StrategyParameters) error {
|
||||
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
|
||||
if err := validateLowNodeUtilizationParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
klog.ErrorS(err, "Invalid LowNodeUtilization parameters")
|
||||
return
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("failed to get threshold priority from strategy's params: %#v", err)
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
||||
if err := validateStrategyConfig(thresholds, targetThresholds); err != nil {
|
||||
klog.Errorf("LowNodeUtilization config is not valid: %v", err)
|
||||
klog.ErrorS(err, "LowNodeUtilization config is not valid")
|
||||
return
|
||||
}
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
@@ -95,48 +98,59 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
}
|
||||
|
||||
npm := createNodePodsMap(ctx, client, nodes)
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
|
||||
lowNodes, targetNodes := classifyNodes(
|
||||
getNodeUsage(ctx, client, nodes, thresholds, targetThresholds),
|
||||
// The node has to be schedulable (to be able to move workload there)
|
||||
func(node *v1.Node, usage NodeUsage) bool {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
klog.V(2).InfoS("Node is unschedulable, thus not considered as underutilized", "node", klog.KObj(node))
|
||||
return false
|
||||
}
|
||||
return isNodeWithLowUtilization(usage)
|
||||
},
|
||||
func(node *v1.Node, usage NodeUsage) bool {
|
||||
return isNodeAboveTargetUtilization(usage)
|
||||
},
|
||||
)
|
||||
|
||||
klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
|
||||
thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods])
|
||||
klog.V(1).InfoS("Criteria for a node under utilization",
|
||||
"CPU", thresholds[v1.ResourceCPU], "Mem", thresholds[v1.ResourceMemory], "Pods", thresholds[v1.ResourcePods])
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
klog.V(1).Infof("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||
return
|
||||
}
|
||||
klog.V(1).Infof("Total number of underutilized nodes: %v", len(lowNodes))
|
||||
klog.V(1).InfoS("Total number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
|
||||
if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
|
||||
klog.V(1).Infof("number of nodes underutilized (%v) is less than NumberOfNodes (%v), nothing to do here", len(lowNodes), strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
|
||||
klog.V(1).InfoS("Number of nodes underutilized is less than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
|
||||
return
|
||||
}
|
||||
|
||||
if len(lowNodes) == len(nodes) {
|
||||
klog.V(1).Infof("all nodes are underutilized, nothing to do here")
|
||||
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||
return
|
||||
}
|
||||
|
||||
if len(targetNodes) == 0 {
|
||||
klog.V(1).Infof("all nodes are under target utilization, nothing to do here")
|
||||
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
|
||||
return
|
||||
}
|
||||
|
||||
klog.V(1).Infof("Criteria for a node above target utilization: CPU: %v, Mem: %v, Pods: %v",
|
||||
targetThresholds[v1.ResourceCPU], targetThresholds[v1.ResourceMemory], targetThresholds[v1.ResourcePods])
|
||||
klog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
|
||||
klog.V(1).InfoS("Criteria for a node above target utilization",
|
||||
"CPU", targetThresholds[v1.ResourceCPU], "Mem", targetThresholds[v1.ResourceMemory], "Pods", targetThresholds[v1.ResourcePods])
|
||||
|
||||
klog.V(1).InfoS("Number of nodes above target utilization", "totalNumber", len(targetNodes))
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
evictPodsFromTargetNodes(
|
||||
ctx,
|
||||
targetNodes,
|
||||
lowNodes,
|
||||
targetThresholds,
|
||||
podEvictor,
|
||||
evictable.IsEvictable)
|
||||
|
||||
klog.V(1).Infof("Total number of pods evicted: %v", podEvictor.TotalEvicted())
|
||||
klog.V(1).InfoS("Total number of pods evicted", "evictedPods", podEvictor.TotalEvicted())
|
||||
}
|
||||
|
||||
// validateStrategyConfig checks if the strategy's config is valid
|
||||
@@ -181,29 +195,87 @@ func validateThresholds(thresholds api.ResourceThresholds) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
|
||||
// low and high thresholds, it is simply ignored.
|
||||
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds) ([]NodeUsageMap, []NodeUsageMap) {
|
||||
lowNodes, targetNodes := []NodeUsageMap{}, []NodeUsageMap{}
|
||||
for node, pods := range npm {
|
||||
usage := nodeUtilization(node, pods)
|
||||
nuMap := NodeUsageMap{
|
||||
node: node,
|
||||
usage: usage,
|
||||
allPods: pods,
|
||||
func getNodeUsage(
|
||||
ctx context.Context,
|
||||
client clientset.Interface,
|
||||
nodes []*v1.Node,
|
||||
lowThreshold, highThreshold api.ResourceThresholds,
|
||||
) []NodeUsage {
|
||||
nodeUsageList := []NodeUsage{}
|
||||
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node)
|
||||
if err != nil {
|
||||
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
|
||||
continue
|
||||
}
|
||||
// Check if node is underutilized and if we can schedule pods on it.
|
||||
if !nodeutil.IsNodeUnschedulable(node) && isNodeWithLowUtilization(usage, thresholds) {
|
||||
klog.V(2).Infof("Node %#v is under utilized with usage: %#v", node.Name, usage)
|
||||
lowNodes = append(lowNodes, nuMap)
|
||||
} else if isNodeAboveTargetUtilization(usage, targetThresholds) {
|
||||
klog.V(2).Infof("Node %#v is over utilized with usage: %#v", node.Name, usage)
|
||||
targetNodes = append(targetNodes, nuMap)
|
||||
} else {
|
||||
klog.V(2).Infof("Node %#v is appropriately utilized with usage: %#v", node.Name, usage)
|
||||
|
||||
nodeCapacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
}
|
||||
|
||||
nodeUsageList = append(nodeUsageList, NodeUsage{
|
||||
node: node,
|
||||
usage: nodeUtilization(node, pods),
|
||||
allPods: pods,
|
||||
// A threshold is in percentages but in <0;100> interval.
|
||||
// Performing `threshold * 0.01` will convert <0;100> interval into <0;1>.
|
||||
// Multiplying it with capacity will give fraction of the capacity corresponding to the given high/low resource threshold in Quantity units.
|
||||
lowResourceThreshold: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(int64(float64(lowThreshold[v1.ResourceCPU])*float64(nodeCapacity.Cpu().MilliValue())*0.01), resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(int64(float64(lowThreshold[v1.ResourceMemory])*float64(nodeCapacity.Memory().Value())*0.01), resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(float64(lowThreshold[v1.ResourcePods])*float64(nodeCapacity.Pods().Value())*0.01), resource.DecimalSI),
|
||||
},
|
||||
highResourceThreshold: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(int64(float64(highThreshold[v1.ResourceCPU])*float64(nodeCapacity.Cpu().MilliValue())*0.01), resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(int64(float64(highThreshold[v1.ResourceMemory])*float64(nodeCapacity.Memory().Value())*0.01), resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(float64(highThreshold[v1.ResourcePods])*float64(nodeCapacity.Pods().Value())*0.01), resource.DecimalSI),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return nodeUsageList
|
||||
}
|
||||
|
||||
func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
|
||||
nodeCapacity := nodeUsage.node.Status.Capacity
|
||||
if len(nodeUsage.node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = nodeUsage.node.Status.Allocatable
|
||||
}
|
||||
|
||||
resourceUsagePercentage := map[v1.ResourceName]float64{}
|
||||
for resourceName, resourceUsage := range nodeUsage.usage {
|
||||
cap := nodeCapacity[resourceName]
|
||||
if !cap.IsZero() {
|
||||
resourceUsagePercentage[resourceName] = 100 * float64(resourceUsage.Value()) / float64(cap.Value())
|
||||
}
|
||||
}
|
||||
return lowNodes, targetNodes
|
||||
|
||||
return resourceUsagePercentage
|
||||
}
|
||||
|
||||
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
|
||||
// low and high thresholds, it is simply ignored.
|
||||
func classifyNodes(
|
||||
nodeUsages []NodeUsage,
|
||||
lowThresholdFilter, highThresholdFilter func(node *v1.Node, usage NodeUsage) bool,
|
||||
) ([]NodeUsage, []NodeUsage) {
|
||||
lowNodes, highNodes := []NodeUsage{}, []NodeUsage{}
|
||||
|
||||
for _, nodeUsage := range nodeUsages {
|
||||
if lowThresholdFilter(nodeUsage.node, nodeUsage) {
|
||||
klog.V(2).InfoS("Node is underutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||
lowNodes = append(lowNodes, nodeUsage)
|
||||
} else if highThresholdFilter(nodeUsage.node, nodeUsage) {
|
||||
klog.V(2).InfoS("Node is overutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||
highNodes = append(highNodes, nodeUsage)
|
||||
} else {
|
||||
klog.V(2).InfoS("Node is appropriately utilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||
}
|
||||
}
|
||||
|
||||
return lowNodes, highNodes
|
||||
}
|
||||
|
||||
// evictPodsFromTargetNodes evicts pods based on priority, if all the pods on the node have priority, if not
|
||||
@@ -211,8 +283,7 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
|
||||
// TODO: @ravig Break this function into smaller functions.
|
||||
func evictPodsFromTargetNodes(
|
||||
ctx context.Context,
|
||||
targetNodes, lowNodes []NodeUsageMap,
|
||||
targetThresholds api.ResourceThresholds,
|
||||
targetNodes, lowNodes []NodeUsage,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
podFilter func(pod *v1.Pod) bool,
|
||||
) {
|
||||
@@ -220,101 +291,104 @@ func evictPodsFromTargetNodes(
|
||||
sortNodesByUsage(targetNodes)
|
||||
|
||||
// upper bound on total number of pods/cpu/memory to be moved
|
||||
var totalPods, totalCPU, totalMem float64
|
||||
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourcePods: {},
|
||||
v1.ResourceCPU: {},
|
||||
v1.ResourceMemory: {},
|
||||
}
|
||||
|
||||
var taintsOfLowNodes = make(map[string][]v1.Taint, len(lowNodes))
|
||||
for _, node := range lowNodes {
|
||||
taintsOfLowNodes[node.node.Name] = node.node.Spec.Taints
|
||||
nodeCapacity := node.node.Status.Capacity
|
||||
if len(node.node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.node.Status.Allocatable
|
||||
|
||||
for name := range totalAvailableUsage {
|
||||
totalAvailableUsage[name].Add(*node.highResourceThreshold[name])
|
||||
totalAvailableUsage[name].Sub(*node.usage[name])
|
||||
}
|
||||
// totalPods to be moved
|
||||
podsPercentage := targetThresholds[v1.ResourcePods] - node.usage[v1.ResourcePods]
|
||||
totalPods += ((float64(podsPercentage) * float64(nodeCapacity.Pods().Value())) / 100)
|
||||
|
||||
// totalCPU capacity to be moved
|
||||
cpuPercentage := targetThresholds[v1.ResourceCPU] - node.usage[v1.ResourceCPU]
|
||||
totalCPU += ((float64(cpuPercentage) * float64(nodeCapacity.Cpu().MilliValue())) / 100)
|
||||
|
||||
// totalMem capacity to be moved
|
||||
memPercentage := targetThresholds[v1.ResourceMemory] - node.usage[v1.ResourceMemory]
|
||||
totalMem += ((float64(memPercentage) * float64(nodeCapacity.Memory().Value())) / 100)
|
||||
}
|
||||
|
||||
klog.V(1).Infof("Total capacity to be moved: CPU:%v, Mem:%v, Pods:%v", totalCPU, totalMem, totalPods)
|
||||
klog.V(1).Infof("********Number of pods evicted from each node:***********")
|
||||
klog.V(1).InfoS(
|
||||
"Total capacity to be moved",
|
||||
"CPU", totalAvailableUsage[v1.ResourceCPU].MilliValue(),
|
||||
"Mem", totalAvailableUsage[v1.ResourceMemory].Value(),
|
||||
"Pods", totalAvailableUsage[v1.ResourcePods].Value(),
|
||||
)
|
||||
|
||||
for _, node := range targetNodes {
|
||||
nodeCapacity := node.node.Status.Capacity
|
||||
if len(node.node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.node.Status.Allocatable
|
||||
}
|
||||
klog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
|
||||
klog.V(3).InfoS("Evicting pods from node", "node", klog.KObj(node.node), "usage", node.usage)
|
||||
|
||||
nonRemovablePods, removablePods := classifyPods(node.allPods, podFilter)
|
||||
klog.V(2).Infof("allPods:%v, nonRemovablePods:%v, removablePods:%v", len(node.allPods), len(nonRemovablePods), len(removablePods))
|
||||
klog.V(2).InfoS("Pods on node", "node", klog.KObj(node.node), "allPods", len(node.allPods), "nonRemovablePods", len(nonRemovablePods), "removablePods", len(removablePods))
|
||||
|
||||
if len(removablePods) == 0 {
|
||||
klog.V(1).Infof("no removable pods on node %#v, try next node", node.node.Name)
|
||||
klog.V(1).InfoS("No removable pods on node, try next node", "node", klog.KObj(node.node))
|
||||
continue
|
||||
}
|
||||
|
||||
klog.V(1).Infof("evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
|
||||
klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
|
||||
evictPods(ctx, removablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
|
||||
klog.V(1).Infof("%v pods evicted from node %#v with usage %v", podEvictor.NodeEvicted(node.node), node.node.Name, node.usage)
|
||||
evictPods(ctx, removablePods, node, totalAvailableUsage, taintsOfLowNodes, podEvictor)
|
||||
klog.V(1).InfoS("Evicted pods from node", "node", klog.KObj(node.node), "evictedPods", podEvictor.NodeEvicted(node.node), "usage", node.usage)
|
||||
}
|
||||
}
|
||||
|
||||
func evictPods(
|
||||
ctx context.Context,
|
||||
inputPods []*v1.Pod,
|
||||
targetThresholds api.ResourceThresholds,
|
||||
nodeCapacity v1.ResourceList,
|
||||
nodeUsage api.ResourceThresholds,
|
||||
totalPods *float64,
|
||||
totalCPU *float64,
|
||||
totalMem *float64,
|
||||
nodeUsage NodeUsage,
|
||||
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
|
||||
taintsOfLowNodes map[string][]v1.Taint,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
node *v1.Node) {
|
||||
) {
|
||||
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
||||
if isNodeAboveTargetUtilization(nodeUsage, targetThresholds) && *totalPods > 0 && *totalCPU > 0 && *totalMem > 0 {
|
||||
onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapacity.Pods().Value()))
|
||||
continueCond := func() bool {
|
||||
if !isNodeAboveTargetUtilization(nodeUsage) {
|
||||
return false
|
||||
}
|
||||
if totalAvailableUsage[v1.ResourcePods].CmpInt64(0) < 1 {
|
||||
return false
|
||||
}
|
||||
if totalAvailableUsage[v1.ResourceCPU].CmpInt64(0) < 1 {
|
||||
return false
|
||||
}
|
||||
if totalAvailableUsage[v1.ResourceMemory].CmpInt64(0) < 1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if continueCond() {
|
||||
for _, pod := range inputPods {
|
||||
if !utils.PodToleratesTaints(pod, taintsOfLowNodes) {
|
||||
klog.V(3).Infof("Skipping eviction for Pod: %#v, doesn't tolerate node taint", pod.Name)
|
||||
klog.V(3).InfoS("Skipping eviction for pod, doesn't tolerate node taint", "pod", klog.KObj(pod))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
cUsage := utils.GetResourceRequest(pod, v1.ResourceCPU)
|
||||
mUsage := utils.GetResourceRequest(pod, v1.ResourceMemory)
|
||||
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node, "LowNodeUtilization")
|
||||
success, err := podEvictor.EvictPod(ctx, pod, nodeUsage.node, "LowNodeUtilization")
|
||||
if err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
}
|
||||
|
||||
if success {
|
||||
klog.V(3).Infof("Evicted pod: %#v", pod.Name)
|
||||
// update remaining pods
|
||||
nodeUsage[v1.ResourcePods] -= onePodPercentage
|
||||
*totalPods--
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod), "err", err)
|
||||
|
||||
// update remaining cpu
|
||||
*totalCPU -= float64(cUsage)
|
||||
nodeUsage[v1.ResourceCPU] -= api.Percentage((float64(cUsage) * 100) / float64(nodeCapacity.Cpu().MilliValue()))
|
||||
cpuQuantity := utils.GetResourceRequestQuantity(pod, v1.ResourceCPU)
|
||||
nodeUsage.usage[v1.ResourceCPU].Sub(cpuQuantity)
|
||||
totalAvailableUsage[v1.ResourceCPU].Sub(cpuQuantity)
|
||||
|
||||
// update remaining memory
|
||||
*totalMem -= float64(mUsage)
|
||||
nodeUsage[v1.ResourceMemory] -= api.Percentage(float64(mUsage) / float64(nodeCapacity.Memory().Value()) * 100)
|
||||
memoryQuantity := utils.GetResourceRequestQuantity(pod, v1.ResourceMemory)
|
||||
nodeUsage.usage[v1.ResourceMemory].Sub(memoryQuantity)
|
||||
totalAvailableUsage[v1.ResourceMemory].Sub(memoryQuantity)
|
||||
|
||||
klog.V(3).Infof("updated node usage: %#v", nodeUsage)
|
||||
nodeUsage.usage[v1.ResourcePods].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
totalAvailableUsage[v1.ResourcePods].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
|
||||
klog.V(3).InfoS("Updated node usage", "updatedUsage", nodeUsage)
|
||||
// check if node utilization drops below target threshold or any required capacity (cpu, memory, pods) is moved
|
||||
if !isNodeAboveTargetUtilization(nodeUsage, targetThresholds) || *totalPods <= 0 || *totalCPU <= 0 || *totalMem <= 0 {
|
||||
if !continueCond() {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -323,70 +397,45 @@ func evictPods(
|
||||
}
|
||||
|
||||
// sortNodesByUsage sorts nodes based on usage in descending order
|
||||
func sortNodesByUsage(nodes []NodeUsageMap) {
|
||||
func sortNodesByUsage(nodes []NodeUsage) {
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
var ti, tj api.Percentage
|
||||
for name, value := range nodes[i].usage {
|
||||
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
|
||||
ti += value
|
||||
}
|
||||
}
|
||||
for name, value := range nodes[j].usage {
|
||||
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
|
||||
tj += value
|
||||
}
|
||||
}
|
||||
ti := nodes[i].usage[v1.ResourceMemory].Value() + nodes[i].usage[v1.ResourceCPU].MilliValue() + nodes[i].usage[v1.ResourcePods].Value()
|
||||
tj := nodes[j].usage[v1.ResourceMemory].Value() + nodes[j].usage[v1.ResourceCPU].MilliValue() + nodes[j].usage[v1.ResourcePods].Value()
|
||||
// To return sorted in descending order
|
||||
return ti > tj
|
||||
})
|
||||
}
|
||||
|
||||
// createNodePodsMap returns nodepodsmap with evictable pods on node.
|
||||
func createNodePodsMap(ctx context.Context, client clientset.Interface, nodes []*v1.Node) NodePodsMap {
|
||||
npm := NodePodsMap{}
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node)
|
||||
if err != nil {
|
||||
klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
|
||||
} else {
|
||||
npm[node] = pods
|
||||
}
|
||||
}
|
||||
return npm
|
||||
}
|
||||
|
||||
// isNodeAboveTargetUtilization checks if a node is overutilized
|
||||
func isNodeAboveTargetUtilization(nodeThresholds api.ResourceThresholds, thresholds api.ResourceThresholds) bool {
|
||||
for name, nodeValue := range nodeThresholds {
|
||||
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
|
||||
if value, ok := thresholds[name]; !ok {
|
||||
continue
|
||||
} else if nodeValue > value {
|
||||
return true
|
||||
}
|
||||
// At least one resource has to be above the high threshold
|
||||
func isNodeAboveTargetUtilization(usage NodeUsage) bool {
|
||||
for name, nodeValue := range usage.usage {
|
||||
// usage.highResourceThreshold[name] < nodeValue
|
||||
if usage.highResourceThreshold[name].Cmp(*nodeValue) == -1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isNodeWithLowUtilization checks if a node is underutilized
|
||||
func isNodeWithLowUtilization(nodeThresholds api.ResourceThresholds, thresholds api.ResourceThresholds) bool {
|
||||
for name, nodeValue := range nodeThresholds {
|
||||
if name == v1.ResourceCPU || name == v1.ResourceMemory || name == v1.ResourcePods {
|
||||
if value, ok := thresholds[name]; !ok {
|
||||
continue
|
||||
} else if nodeValue > value {
|
||||
return false
|
||||
}
|
||||
// All resources have to be below the low threshold
|
||||
func isNodeWithLowUtilization(usage NodeUsage) bool {
|
||||
for name, nodeValue := range usage.usage {
|
||||
// usage.lowResourceThreshold[name] < nodeValue
|
||||
if usage.lowResourceThreshold[name].Cmp(*nodeValue) == -1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func nodeUtilization(node *v1.Node, pods []*v1.Pod) api.ResourceThresholds {
|
||||
func nodeUtilization(node *v1.Node, pods []*v1.Pod) map[v1.ResourceName]*resource.Quantity {
|
||||
totalReqs := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: {},
|
||||
v1.ResourceMemory: {},
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
|
||||
}
|
||||
for _, pod := range pods {
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
@@ -399,17 +448,7 @@ func nodeUtilization(node *v1.Node, pods []*v1.Pod) api.ResourceThresholds {
|
||||
}
|
||||
}
|
||||
|
||||
nodeCapacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
}
|
||||
|
||||
totalPods := len(pods)
|
||||
return api.ResourceThresholds{
|
||||
v1.ResourceCPU: api.Percentage((float64(totalReqs[v1.ResourceCPU].MilliValue()) * 100) / float64(nodeCapacity.Cpu().MilliValue())),
|
||||
v1.ResourceMemory: api.Percentage(float64(totalReqs[v1.ResourceMemory].Value()) / float64(nodeCapacity.Memory().Value()) * 100),
|
||||
v1.ResourcePods: api.Percentage((float64(totalPods) * 100) / float64(nodeCapacity.Pods().Value())),
|
||||
}
|
||||
return totalReqs
|
||||
}
|
||||
|
||||
func classifyPods(pods []*v1.Pod, filter func(pod *v1.Pod) bool) ([]*v1.Pod, []*v1.Pod) {
|
||||
|
||||
@@ -49,12 +49,12 @@ func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) err
|
||||
// RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity
|
||||
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeAffinity parameters")
|
||||
return
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("failed to get threshold priority from strategy's params: %#v", err)
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -67,12 +67,12 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
||||
klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
|
||||
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||
|
||||
switch nodeAffinity {
|
||||
case "requiredDuringSchedulingIgnoredDuringExecution":
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
@@ -87,22 +87,22 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
podutil.WithoutNamespaces(excludedNamespaces),
|
||||
)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get pods from %v: %v", node.Name, err)
|
||||
klog.ErrorS(err, "Failed to get pods", "node", klog.KObj(node))
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
klog.V(1).Infof("Evicting pod: %v", pod.Name)
|
||||
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, node, "NodeAffinity"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
klog.ErrorS(err, "Error evicting pod")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
klog.Errorf("invalid nodeAffinityType: %v", nodeAffinity)
|
||||
klog.ErrorS(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||
}
|
||||
}
|
||||
klog.V(1).Infof("Evicted %v pods", podEvictor.TotalEvicted())
|
||||
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters)
|
||||
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeTaints parameters")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -61,14 +61,14 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("failed to get threshold priority from strategy's params: %#v", err)
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
client,
|
||||
@@ -88,9 +88,9 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
node.Spec.Taints,
|
||||
func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule },
|
||||
) {
|
||||
klog.V(2).Infof("Not all taints with NoSchedule effect are tolerated after update for pod %v on node %v", pods[i].Name, node.Name)
|
||||
klog.V(2).InfoS("Not all taints with NoSchedule effect are tolerated after update for pod on node", "pod", klog.KObj(pods[i]), "node", klog.KObj(node))
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "NodeTaint"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
klog.ErrorS(err, "Error evicting pod")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ func validateRemovePodsViolatingInterPodAntiAffinityParams(params *api.StrategyP
|
||||
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validateRemovePodsViolatingInterPodAntiAffinityParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
klog.ErrorS(err, "Invalid RemovePodsViolatingInterPodAntiAffinity parameters")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -62,19 +62,18 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("failed to get threshold priority from strategy's params: %#v", err)
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
client,
|
||||
node,
|
||||
podutil.WithFilter(evictable.IsEvictable),
|
||||
podutil.WithNamespaces(includedNamespaces),
|
||||
podutil.WithoutNamespaces(excludedNamespaces),
|
||||
)
|
||||
@@ -85,10 +84,10 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(pods)
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) {
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) && evictable.IsEvictable(pods[i]) {
|
||||
success, err := podEvictor.EvictPod(ctx, pods[i], node, "InterPodAntiAffinity")
|
||||
if err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
klog.ErrorS(err, "Error evicting pod")
|
||||
break
|
||||
}
|
||||
|
||||
@@ -113,7 +112,7 @@ func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods []*v1.Pod) bool {
|
||||
namespaces := utils.GetNamespacesFromPodAffinityTerm(pod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
klog.Infof("%v", err)
|
||||
klog.ErrorS(err, "Unable to convert LabelSelector into Selector")
|
||||
return false
|
||||
}
|
||||
for _, existingPod := range pods {
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -40,10 +41,15 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name, nil)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name, nil)
|
||||
criticalPriority := utils.SystemCriticalPriority
|
||||
nonEvictablePod := test.BuildTestPod("non-evict", 100, 0, node.Name, func(pod *v1.Pod) {
|
||||
pod.Spec.Priority = &criticalPriority
|
||||
})
|
||||
p2.Labels = map[string]string{"foo": "bar"}
|
||||
p5.Labels = map[string]string{"foo": "bar"}
|
||||
p6.Labels = map[string]string{"foo": "bar"}
|
||||
p7.Labels = map[string]string{"foo1": "bar1"}
|
||||
nonEvictablePod.Labels = map[string]string{"foo": "bar"}
|
||||
test.SetNormalOwnerRef(p1)
|
||||
test.SetNormalOwnerRef(p2)
|
||||
test.SetNormalOwnerRef(p3)
|
||||
@@ -89,6 +95,12 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
pods: []v1.Pod{*p5, *p6, *p7},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||
maxPodsToEvictPerNode: 1,
|
||||
pods: []v1.Pod{*p1, *nonEvictablePod},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
@@ -32,10 +32,18 @@ import (
|
||||
)
|
||||
|
||||
func validatePodLifeTimeParams(params *api.StrategyParameters) error {
|
||||
if params == nil || params.MaxPodLifeTimeSeconds == nil {
|
||||
if params == nil || params.PodLifeTime == nil || params.PodLifeTime.MaxPodLifeTimeSeconds == nil {
|
||||
return fmt.Errorf("MaxPodLifeTimeSeconds not set")
|
||||
}
|
||||
|
||||
if params.PodLifeTime.PodStatusPhases != nil {
|
||||
for _, phase := range params.PodLifeTime.PodStatusPhases {
|
||||
if phase != string(v1.PodPending) && phase != string(v1.PodRunning) {
|
||||
return fmt.Errorf("only Pending and Running phases are supported in PodLifeTime")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// At most one of include/exclude can be set
|
||||
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
@@ -50,13 +58,13 @@ func validatePodLifeTimeParams(params *api.StrategyParameters) error {
|
||||
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
|
||||
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validatePodLifeTimeParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
klog.ErrorS(err, "Invalid PodLifeTime parameters")
|
||||
return
|
||||
}
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("failed to get threshold priority from strategy's params: %#v", err)
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -68,18 +76,30 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
filter := evictable.IsEvictable
|
||||
if strategy.Params.PodLifeTime.PodStatusPhases != nil {
|
||||
filter = func(pod *v1.Pod) bool {
|
||||
for _, phase := range strategy.Params.PodLifeTime.PodStatusPhases {
|
||||
if string(pod.Status.Phase) == phase {
|
||||
return evictable.IsEvictable(pod)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
pods := listOldPodsOnNode(ctx, client, node, includedNamespaces, excludedNamespaces, *strategy.Params.MaxPodLifeTimeSeconds, evictable.IsEvictable)
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
|
||||
pods := listOldPodsOnNode(ctx, client, node, includedNamespaces, excludedNamespaces, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds, filter)
|
||||
for _, pod := range pods {
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime")
|
||||
if success {
|
||||
klog.V(1).Infof("Evicted pod: %#v because it was created more than %v seconds ago", pod.Name, *strategy.Params.MaxPodLifeTimeSeconds)
|
||||
klog.V(1).InfoS("Evicted pod because it exceeded its lifetime", "pod", klog.KObj(pod), "maxPodLifeTime", *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,6 +85,19 @@ func TestPodLifeTime(t *testing.T) {
|
||||
p5.ObjectMeta.OwnerReferences = ownerRef4
|
||||
p6.ObjectMeta.OwnerReferences = ownerRef4
|
||||
|
||||
// Setup two old pods with different status phases
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node.Name, nil)
|
||||
p9.Namespace = "dev"
|
||||
p9.ObjectMeta.CreationTimestamp = olderPodCreationTime
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node.Name, nil)
|
||||
p10.Namespace = "dev"
|
||||
p10.ObjectMeta.CreationTimestamp = olderPodCreationTime
|
||||
|
||||
p9.Status.Phase = "Pending"
|
||||
p10.Status.Phase = "Running"
|
||||
p9.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p10.ObjectMeta.OwnerReferences = ownerRef1
|
||||
|
||||
var maxLifeTime uint = 600
|
||||
testCases := []struct {
|
||||
description string
|
||||
@@ -98,7 +111,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
@@ -110,7 +123,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
@@ -122,7 +135,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
@@ -134,13 +147,28 @@ func TestPodLifeTime(t *testing.T) {
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p7, *p8},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
description: "Two old pods with different status phases. 1 should be evicted.",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
PodLifeTime: &api.PodLifeTime{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
PodStatusPhases: []string{"Pending"},
|
||||
},
|
||||
},
|
||||
},
|
||||
maxPodsToEvictPerNode: 5,
|
||||
pods: []v1.Pod{*p9, *p10},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
||||
@@ -51,13 +51,13 @@ func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameter
|
||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil {
|
||||
klog.V(1).Info(err)
|
||||
klog.ErrorS(err, "Invalid RemovePodsHavingTooManyRestarts parameters")
|
||||
return
|
||||
}
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("failed to get threshold priority from strategy's params: %#v", err)
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %s", node.Name)
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
ctx,
|
||||
client,
|
||||
@@ -80,7 +80,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
|
||||
podutil.WithoutNamespaces(excludedNamespaces),
|
||||
)
|
||||
if err != nil {
|
||||
klog.Errorf("Error when list pods at node %s", node.Name)
|
||||
klog.ErrorS(err, "Error listing a nodes pods", "node", klog.KObj(node))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
|
||||
continue
|
||||
}
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "TooManyRestarts"); err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
381
pkg/descheduler/strategies/topologyspreadconstraint.go
Normal file
381
pkg/descheduler/strategies/topologyspreadconstraint.go
Normal file
@@ -0,0 +1,381 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
// AntiAffinityTerm's topology key value used in predicate metadata
|
||||
type topologyPair struct {
|
||||
key string
|
||||
value string
|
||||
}
|
||||
|
||||
type topology struct {
|
||||
pair topologyPair
|
||||
pods []*v1.Pod
|
||||
}
|
||||
|
||||
func validateAndParseTopologySpreadParams(ctx context.Context, client clientset.Interface, params *api.StrategyParameters) (int32, sets.String, sets.String, error) {
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
if params == nil {
|
||||
return 0, includedNamespaces, excludedNamespaces, nil
|
||||
}
|
||||
// At most one of include/exclude can be set
|
||||
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
|
||||
return 0, includedNamespaces, excludedNamespaces, fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
}
|
||||
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||
return 0, includedNamespaces, excludedNamespaces, fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, params)
|
||||
if err != nil {
|
||||
return 0, includedNamespaces, excludedNamespaces, fmt.Errorf("failed to get threshold priority from strategy's params: %+v", err)
|
||||
}
|
||||
if params.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(params.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(params.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
return thresholdPriority, includedNamespaces, excludedNamespaces, nil
|
||||
}
|
||||
|
||||
func RemovePodsViolatingTopologySpreadConstraint(
|
||||
ctx context.Context,
|
||||
client clientset.Interface,
|
||||
strategy api.DeschedulerStrategy,
|
||||
nodes []*v1.Node,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
) {
|
||||
thresholdPriority, includedNamespaces, excludedNamespaces, err := validateAndParseTopologySpreadParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Invalid PodLifeTime parameters")
|
||||
return
|
||||
}
|
||||
|
||||
nodeMap := make(map[string]*v1.Node, len(nodes))
|
||||
for _, node := range nodes {
|
||||
nodeMap[node.Name] = node
|
||||
}
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
// 1. for each namespace for which there is Topology Constraint
|
||||
// 2. for each TopologySpreadConstraint in that namespace
|
||||
// { find all evictable pods in that namespace
|
||||
// { 3. for each evictable pod in that namespace
|
||||
// 4. If the pod matches this TopologySpreadConstraint LabelSelector
|
||||
// 5. If the pod nodeName is present in the nodeMap
|
||||
// 6. create a topoPair with key as this TopologySpreadConstraint.TopologyKey and value as this pod's Node Label Value for this TopologyKey
|
||||
// 7. add the pod with key as this topoPair
|
||||
// 8. find the min number of pods in any topoPair for this topologyKey
|
||||
// iterate through all topoPairs for this topologyKey and diff currentPods -minPods <=maxSkew
|
||||
// if diff > maxSkew, add this pod in the current bucket for eviction
|
||||
|
||||
// First record all of the constraints by namespace
|
||||
namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Couldn't list namespaces")
|
||||
return
|
||||
}
|
||||
klog.V(1).InfoS("Processing namespaces for topology spread constraints")
|
||||
podsForEviction := make(map[*v1.Pod]struct{})
|
||||
// 1. for each namespace...
|
||||
for _, namespace := range namespaces.Items {
|
||||
if (len(includedNamespaces) > 0 && !includedNamespaces.Has(namespace.Name)) ||
|
||||
(len(excludedNamespaces) > 0 && excludedNamespaces.Has(namespace.Name)) {
|
||||
continue
|
||||
}
|
||||
namespacePods, err := client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Couldn't list pods in namespace", "namespace", namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
// ...where there is a topology constraint
|
||||
//namespaceTopologySpreadConstrainPods := make([]v1.Pod, 0, len(namespacePods.Items))
|
||||
namespaceTopologySpreadConstraints := make(map[v1.TopologySpreadConstraint]struct{})
|
||||
for _, pod := range namespacePods.Items {
|
||||
for _, constraint := range pod.Spec.TopologySpreadConstraints {
|
||||
// Only deal with hard topology constraints
|
||||
// TODO(@damemi): add support for soft constraints
|
||||
if constraint.WhenUnsatisfiable != v1.DoNotSchedule {
|
||||
continue
|
||||
}
|
||||
namespaceTopologySpreadConstraints[constraint] = struct{}{}
|
||||
}
|
||||
}
|
||||
if len(namespaceTopologySpreadConstraints) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// 2. for each topologySpreadConstraint in that namespace
|
||||
for constraint := range namespaceTopologySpreadConstraints {
|
||||
constraintTopologies := make(map[topologyPair][]*v1.Pod)
|
||||
// pre-populate the topologyPair map with all the topologies available from the nodeMap
|
||||
// (we can't just build it from existing pods' nodes because a topology may have 0 pods)
|
||||
for _, node := range nodeMap {
|
||||
if val, ok := node.Labels[constraint.TopologyKey]; ok {
|
||||
constraintTopologies[topologyPair{key: constraint.TopologyKey, value: val}] = make([]*v1.Pod, 0)
|
||||
}
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(constraint.LabelSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Couldn't parse label selector as selector", "selector", constraint.LabelSelector)
|
||||
continue
|
||||
}
|
||||
|
||||
// 3. for each evictable pod in that namespace
|
||||
// (this loop is where we count the number of pods per topologyValue that match this constraint's selector)
|
||||
var sumPods float64
|
||||
for i := range namespacePods.Items {
|
||||
// 4. if the pod matches this TopologySpreadConstraint LabelSelector
|
||||
if !selector.Matches(labels.Set(namespacePods.Items[i].Labels)) {
|
||||
continue
|
||||
}
|
||||
|
||||
// 5. If the pod's node matches this constraint'selector topologyKey, create a topoPair and add the pod
|
||||
node, ok := nodeMap[namespacePods.Items[i].Spec.NodeName]
|
||||
if !ok {
|
||||
// If ok is false, node is nil in which case node.Labels will panic. In which case a pod is yet to be scheduled. So it's safe to just continue here.
|
||||
continue
|
||||
}
|
||||
nodeValue, ok := node.Labels[constraint.TopologyKey]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// 6. create a topoPair with key as this TopologySpreadConstraint
|
||||
topoPair := topologyPair{key: constraint.TopologyKey, value: nodeValue}
|
||||
// 7. add the pod with key as this topoPair
|
||||
constraintTopologies[topoPair] = append(constraintTopologies[topoPair], &namespacePods.Items[i])
|
||||
sumPods++
|
||||
}
|
||||
if topologyIsBalanced(constraintTopologies, constraint) {
|
||||
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint)
|
||||
continue
|
||||
}
|
||||
balanceDomains(podsForEviction, constraint, constraintTopologies, sumPods, evictable.IsEvictable, nodeMap)
|
||||
}
|
||||
}
|
||||
|
||||
for pod := range podsForEviction {
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName], "PodTopologySpread"); err != nil && !evictable.IsEvictable(pod) {
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// topologyIsBalanced checks if any domains in the topology differ by more than the MaxSkew
|
||||
// this is called before any sorting or other calculations and is used to skip topologies that don't need to be balanced
|
||||
func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, constraint v1.TopologySpreadConstraint) bool {
|
||||
minDomainSize := math.MaxInt32
|
||||
maxDomainSize := math.MinInt32
|
||||
for _, pods := range topology {
|
||||
if len(pods) < minDomainSize {
|
||||
minDomainSize = len(pods)
|
||||
}
|
||||
if len(pods) > maxDomainSize {
|
||||
maxDomainSize = len(pods)
|
||||
}
|
||||
if int32(maxDomainSize-minDomainSize) > constraint.MaxSkew {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// balanceDomains determines how many pods (minimum) should be evicted from large domains to achieve an ideal balance within maxSkew
|
||||
// To actually determine how many pods need to be moved, we sort the topology domains in ascending length
|
||||
// [2, 5, 3, 8, 5, 7]
|
||||
//
|
||||
// Would end up like:
|
||||
// [2, 3, 5, 5, 7, 8]
|
||||
//
|
||||
// We then start at i=[0] and j=[len(list)-1] and compare the 2 topology sizes.
|
||||
// If the diff of the size of the domains is more than the maxSkew, we will move up to half that skew,
|
||||
// or the available pods from the higher domain, or the number required to bring the smaller domain up to the average,
|
||||
// whichever number is less.
|
||||
//
|
||||
// (Note, we will only move as many pods from a domain as possible without bringing it below the ideal average,
|
||||
// and we will not bring any smaller domain above the average)
|
||||
// If the diff is within the skew, we move to the next highest domain.
|
||||
// If the higher domain can't give any more without falling below the average, we move to the next lowest "high" domain
|
||||
//
|
||||
// Following this, the above topology domains end up "sorted" as:
|
||||
// [5, 5, 5, 5, 5, 5]
|
||||
// (assuming even distribution by the scheduler of the evicted pods)
|
||||
func balanceDomains(
|
||||
podsForEviction map[*v1.Pod]struct{},
|
||||
constraint v1.TopologySpreadConstraint,
|
||||
constraintTopologies map[topologyPair][]*v1.Pod,
|
||||
sumPods float64,
|
||||
isEvictable func(*v1.Pod) bool,
|
||||
nodeMap map[string]*v1.Node) {
|
||||
idealAvg := sumPods / float64(len(constraintTopologies))
|
||||
sortedDomains := sortDomains(constraintTopologies, isEvictable)
|
||||
// i is the index for belowOrEqualAvg
|
||||
// j is the index for aboveAvg
|
||||
i := 0
|
||||
j := len(sortedDomains) - 1
|
||||
for i < j {
|
||||
// if j has no more to give without falling below the ideal average, move to next aboveAvg
|
||||
if float64(len(sortedDomains[j].pods)) < idealAvg {
|
||||
j--
|
||||
}
|
||||
|
||||
// skew = actual difference between the domains
|
||||
skew := float64(len(sortedDomains[j].pods) - len(sortedDomains[i].pods))
|
||||
|
||||
// if k and j are within the maxSkew of each other, move to next belowOrEqualAvg
|
||||
if int32(skew) <= constraint.MaxSkew {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// the most that can be given from aboveAvg is:
|
||||
// 1. up to half the distance between them, minus MaxSkew, rounded up
|
||||
// 2. how many it has remaining without falling below the average rounded up, or
|
||||
// 3. how many can be added without bringing the smaller domain above the average rounded up,
|
||||
// whichever is less
|
||||
// (This is the basic principle of keeping all sizes within ~skew of the average)
|
||||
aboveAvg := math.Ceil(float64(len(sortedDomains[j].pods)) - idealAvg)
|
||||
belowAvg := math.Ceil(idealAvg - float64(len(sortedDomains[i].pods)))
|
||||
smallestDiff := math.Min(aboveAvg, belowAvg)
|
||||
halfSkew := math.Ceil((skew - float64(constraint.MaxSkew)) / 2)
|
||||
movePods := int(math.Min(smallestDiff, halfSkew))
|
||||
if movePods <= 0 {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
// remove pods from the higher topology and add them to the list of pods to be evicted
|
||||
// also (just for tracking), add them to the list of pods in the lower topology
|
||||
aboveToEvict := sortedDomains[j].pods[len(sortedDomains[j].pods)-movePods:]
|
||||
for k := range aboveToEvict {
|
||||
// if the pod has a hard nodeAffinity or nodeSelector that only matches this node,
|
||||
// don't bother evicting it as it will just end up back on the same node
|
||||
// however we still account for it "being evicted" so the algorithm can complete
|
||||
// TODO(@damemi): Since we don't order pods wrt their affinities, we should refactor this to skip the current pod
|
||||
// but still try to get the required # of movePods (instead of just chopping that value off the slice above)
|
||||
if aboveToEvict[k].Spec.NodeSelector != nil ||
|
||||
(aboveToEvict[k].Spec.Affinity != nil &&
|
||||
aboveToEvict[k].Spec.Affinity.NodeAffinity != nil &&
|
||||
aboveToEvict[k].Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil &&
|
||||
nodesPodFitsOnBesidesCurrent(aboveToEvict[k], nodeMap) == 0) {
|
||||
klog.V(2).InfoS("Ignoring pod for eviction due to node selector/affinity", "pod", klog.KObj(aboveToEvict[k]))
|
||||
continue
|
||||
}
|
||||
podsForEviction[aboveToEvict[k]] = struct{}{}
|
||||
}
|
||||
sortedDomains[j].pods = sortedDomains[j].pods[:len(sortedDomains[j].pods)-movePods]
|
||||
sortedDomains[i].pods = append(sortedDomains[i].pods, aboveToEvict...)
|
||||
}
|
||||
}
|
||||
|
||||
// nodesPodFitsOnBesidesCurrent counts the number of nodes this pod could fit on based on its affinity
|
||||
// It excludes the current node because, for the sake of domain balancing only, we care about if there is any other
|
||||
// place it could theoretically fit.
|
||||
// If the pod doesn't fit on its current node, that is a job for RemovePodsViolatingNodeAffinity, and irrelevant to Topology Spreading
|
||||
func nodesPodFitsOnBesidesCurrent(pod *v1.Pod, nodeMap map[string]*v1.Node) int {
|
||||
count := 0
|
||||
for _, node := range nodeMap {
|
||||
if nodeutil.PodFitsCurrentNode(pod, node) && node != nodeMap[pod.Spec.NodeName] {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// sortDomains sorts and splits the list of topology domains based on their size
|
||||
// it also sorts the list of pods within the domains based on their node affinity/selector and priority in the following order:
|
||||
// 1. non-evictable pods
|
||||
// 2. pods with selectors or affinity
|
||||
// 3. pods in descending priority
|
||||
// 4. all other pods
|
||||
// We then pop pods off the back of the list for eviction
|
||||
func sortDomains(constraintTopologyPairs map[topologyPair][]*v1.Pod, isEvictable func(*v1.Pod) bool) []topology {
|
||||
sortedTopologies := make([]topology, 0, len(constraintTopologyPairs))
|
||||
// sort the topologies and return 2 lists: those <= the average and those > the average (> list inverted)
|
||||
for pair, list := range constraintTopologyPairs {
|
||||
// Sort the pods within the domain so that the lowest priority pods are considered first for eviction,
|
||||
// followed by the highest priority,
|
||||
// followed by the lowest priority pods with affinity or nodeSelector,
|
||||
// followed by the highest priority pods with affinity or nodeSelector
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
// any non-evictable pods should be considered last (ie, first in the list)
|
||||
if !isEvictable(list[i]) || !isEvictable(list[j]) {
|
||||
// false - i is the only non-evictable, so return true to put it first
|
||||
// true - j is non-evictable, so return false to put j before i
|
||||
// if true and both and non-evictable, order doesn't matter
|
||||
return !(isEvictable(list[i]) && !isEvictable(list[j]))
|
||||
}
|
||||
|
||||
// if both pods have selectors/affinity, compare them by their priority
|
||||
if hasSelectorOrAffinity(*list[i]) == hasSelectorOrAffinity(*list[j]) {
|
||||
comparePodsByPriority(list[i], list[j])
|
||||
}
|
||||
return hasSelectorOrAffinity(*list[i]) && !hasSelectorOrAffinity(*list[j])
|
||||
})
|
||||
sortedTopologies = append(sortedTopologies, topology{pair: pair, pods: list})
|
||||
}
|
||||
|
||||
// create an ascending slice of all key-value toplogy pairs
|
||||
sort.Slice(sortedTopologies, func(i, j int) bool {
|
||||
return len(sortedTopologies[i].pods) < len(sortedTopologies[j].pods)
|
||||
})
|
||||
|
||||
return sortedTopologies
|
||||
}
|
||||
|
||||
func hasSelectorOrAffinity(pod v1.Pod) bool {
|
||||
return pod.Spec.NodeSelector != nil || (pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil)
|
||||
}
|
||||
|
||||
// comparePodsByPriority is a helper to the sort function to compare 2 pods based on their priority values
|
||||
// It will sort the pods in DESCENDING order of priority, since in our logic we evict pods from the back
|
||||
// of the list first.
|
||||
func comparePodsByPriority(iPod, jPod *v1.Pod) bool {
|
||||
if iPod.Spec.Priority != nil && jPod.Spec.Priority != nil {
|
||||
// a LOWER priority value should be evicted FIRST
|
||||
return *iPod.Spec.Priority > *jPod.Spec.Priority
|
||||
} else if iPod.Spec.Priority != nil && jPod.Spec.Priority == nil {
|
||||
// i should come before j
|
||||
return true
|
||||
} else if iPod.Spec.Priority == nil && jPod.Spec.Priority != nil {
|
||||
// j should come before i
|
||||
return false
|
||||
} else {
|
||||
// it doesn't matter. just return true
|
||||
return true
|
||||
}
|
||||
}
|
||||
410
pkg/descheduler/strategies/topologyspreadconstraint_test.go
Normal file
410
pkg/descheduler/strategies/topologyspreadconstraint_test.go
Normal file
@@ -0,0 +1,410 @@
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestTopologySpreadConstraint(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
testCases := []struct {
|
||||
name string
|
||||
pods []*v1.Pod
|
||||
expectedEvictedCount int
|
||||
nodes []*v1.Node
|
||||
strategy api.DeschedulerStrategy
|
||||
namespaces []string
|
||||
}{
|
||||
{
|
||||
name: "2 domains, sizes [2,1], maxSkew=1, move 0 pods",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2]",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
count: 2,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2], exclude kube-system namespace",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
count: 2,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{Enabled: true, Params: &api.StrategyParameters{Namespaces: &api.Namespaces{Exclude: []string{"kube-system"}}}},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [5,2], maxSkew=1, move 1 pod to achieve [4,3]",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
count: 4,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
count: 2,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods to achieve [2,2]",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 2,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [4,0], maxSkew=1, only move 1 pod since pods with nodeSelector and nodeAffinity aren't evicted",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
nodeSelector: map[string]string{"zone": "zoneA"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
nodeSelector: map[string]string{"zone": "zoneA"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
nodeAffinity: &v1.Affinity{NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{MatchExpressions: []v1.NodeSelectorRequirement{{Key: "foo", Values: []string{"bar"}, Operator: v1.NodeSelectorOpIn}}},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "3 domains, sizes [0, 1, 100], maxSkew=1, move 66 pods to get [34, 33, 34]",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneC" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
count: 100,
|
||||
node: "n3",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 66,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "4 domains, sizes [0, 1, 3, 5], should move 3 to get [2, 2, 3, 2]",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneC" }),
|
||||
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneD" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
node: "n3",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
count: 5,
|
||||
node: "n4",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 3,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains size [2 6], maxSkew=2, should move 1 to get [3 5]",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 2,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
count: 6,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
podList := make([]v1.Pod, 0, len(tc.pods))
|
||||
for _, pod := range tc.pods {
|
||||
podList = append(podList, *pod)
|
||||
}
|
||||
return true, &v1.PodList{Items: podList}, nil
|
||||
})
|
||||
fakeClient.Fake.AddReactor("list", "namespaces", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.NamespaceList{Items: []v1.Namespace{{ObjectMeta: metav1.ObjectMeta{Name: "ns1", Namespace: "ns1"}}}}, nil
|
||||
})
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
100,
|
||||
tc.nodes,
|
||||
false,
|
||||
)
|
||||
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != tc.expectedEvictedCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testPodList struct {
|
||||
count int
|
||||
node string
|
||||
labels map[string]string
|
||||
constraints []v1.TopologySpreadConstraint
|
||||
nodeSelector map[string]string
|
||||
nodeAffinity *v1.Affinity
|
||||
}
|
||||
|
||||
func createTestPods(testPods []testPodList) []*v1.Pod {
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
pods := make([]*v1.Pod, 0)
|
||||
podNum := 0
|
||||
for _, tp := range testPods {
|
||||
for i := 0; i < tp.count; i++ {
|
||||
pods = append(pods,
|
||||
test.BuildTestPod(fmt.Sprintf("pod-%d", podNum), 100, 0, tp.node, func(p *v1.Pod) {
|
||||
p.Labels = make(map[string]string)
|
||||
p.Labels = tp.labels
|
||||
p.Namespace = "ns1"
|
||||
p.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p.Spec.TopologySpreadConstraints = tp.constraints
|
||||
p.Spec.NodeSelector = tp.nodeSelector
|
||||
p.Spec.Affinity = tp.nodeAffinity
|
||||
}))
|
||||
podNum++
|
||||
}
|
||||
}
|
||||
return pods
|
||||
}
|
||||
@@ -186,7 +186,7 @@ func PodToleratesTaints(pod *v1.Pod, taintsOfNodes map[string][]v1.Taint) bool {
|
||||
if len(pod.Spec.Tolerations) >= len(taintsForNode) && TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taintsForNode, nil) {
|
||||
return true
|
||||
}
|
||||
klog.V(5).Infof("pod: %#v doesn't tolerate node %s's taints", pod.Name, nodeName)
|
||||
klog.V(5).InfoS("Pod doesn't tolerate nodes taint", "pod", klog.KObj(pod), "nodeName", nodeName)
|
||||
}
|
||||
|
||||
return false
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/component-helpers/scheduling/corev1"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
@@ -69,64 +69,17 @@ func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool {
|
||||
|
||||
// Match node selector for requiredDuringSchedulingIgnoredDuringExecution.
|
||||
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
||||
klog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms)
|
||||
return nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
|
||||
klog.V(10).InfoS("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector", "selector", nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution)
|
||||
matches, err := corev1.MatchNodeSelectorTerms(node, nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "error parsing node selector", "selector", nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution)
|
||||
}
|
||||
return matches
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms,
|
||||
// terms are ORed, and an empty list of terms will match nothing.
|
||||
func nodeMatchesNodeSelectorTerms(node *v1.Node, nodeSelectorTerms []v1.NodeSelectorTerm) bool {
|
||||
for _, req := range nodeSelectorTerms {
|
||||
nodeSelector, err := NodeSelectorRequirementsAsSelector(req.MatchExpressions)
|
||||
if err != nil {
|
||||
klog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions)
|
||||
return false
|
||||
}
|
||||
if nodeSelector.Matches(labels.Set(node.Labels)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements
|
||||
// labels.Selector.
|
||||
func NodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement) (labels.Selector, error) {
|
||||
if len(nsm) == 0 {
|
||||
return labels.Nothing(), nil
|
||||
}
|
||||
selector := labels.NewSelector()
|
||||
for _, expr := range nsm {
|
||||
var op selection.Operator
|
||||
switch expr.Operator {
|
||||
case v1.NodeSelectorOpIn:
|
||||
op = selection.In
|
||||
case v1.NodeSelectorOpNotIn:
|
||||
op = selection.NotIn
|
||||
case v1.NodeSelectorOpExists:
|
||||
op = selection.Exists
|
||||
case v1.NodeSelectorOpDoesNotExist:
|
||||
op = selection.DoesNotExist
|
||||
case v1.NodeSelectorOpGt:
|
||||
op = selection.GreaterThan
|
||||
case v1.NodeSelectorOpLt:
|
||||
op = selection.LessThan
|
||||
default:
|
||||
return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator)
|
||||
}
|
||||
r, err := labels.NewRequirement(expr.Key, op, expr.Values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector = selector.Add(*r)
|
||||
}
|
||||
return selector, nil
|
||||
}
|
||||
|
||||
// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations.
|
||||
func TolerationsTolerateTaint(tolerations []v1.Toleration, taint *v1.Taint) bool {
|
||||
for i := range tolerations {
|
||||
|
||||
@@ -206,7 +206,7 @@ func runPodLifetimeStrategy(ctx context.Context, clientset clientset.Interface,
|
||||
deschedulerapi.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &deschedulerapi.StrategyParameters{
|
||||
MaxPodLifeTimeSeconds: &maxPodLifeTimeSeconds,
|
||||
PodLifeTime: &deschedulerapi.PodLifeTime{MaxPodLifeTimeSeconds: &maxPodLifeTimeSeconds},
|
||||
Namespaces: namespaces,
|
||||
ThresholdPriority: priority,
|
||||
ThresholdPriorityClassName: priorityClass,
|
||||
@@ -567,7 +567,10 @@ func TestDeschedulingInterval(t *testing.T) {
|
||||
}
|
||||
|
||||
// By default, the DeschedulingInterval param should be set to 0, meaning Descheduler only runs once then exits
|
||||
s := options.NewDeschedulerServer()
|
||||
s, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize server: %v", err)
|
||||
}
|
||||
s.Client = clientSet
|
||||
|
||||
deschedulerPolicy := &api.DeschedulerPolicy{}
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
if [ -n "$KIND_E2E" ]; then
|
||||
K8S_VERSION=${KUBERNETES_VERSION:-v1.18.2}
|
||||
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
|
||||
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.8.1/kind-linux-amd64
|
||||
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.9.0/kind-linux-amd64
|
||||
chmod +x kind-linux-amd64
|
||||
mv kind-linux-amd64 kind
|
||||
export PATH=$PATH:$PWD
|
||||
|
||||
12
vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json
generated
vendored
12
vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json
generated
vendored
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"name": "metadata",
|
||||
"name_pretty": "Google Compute Engine Metadata API",
|
||||
"product_documentation": "https://cloud.google.com/compute/docs/storing-retrieving-metadata",
|
||||
"client_documentation": "https://godoc.org/cloud.google.com/go/compute/metadata",
|
||||
"release_level": "ga",
|
||||
"language": "go",
|
||||
"repo": "googleapis/google-cloud-go",
|
||||
"distribution_name": "cloud.google.com/go/compute/metadata",
|
||||
"api_id": "compute:metadata",
|
||||
"requires_billing": false
|
||||
}
|
||||
41
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
41
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@@ -61,25 +61,14 @@ var (
|
||||
instID = &cachedValue{k: "instance/id", trim: true}
|
||||
)
|
||||
|
||||
var (
|
||||
defaultClient = &Client{hc: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
ResponseHeaderTimeout: 2 * time.Second,
|
||||
},
|
||||
}}
|
||||
subscribeClient = &Client{hc: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
}}
|
||||
)
|
||||
var defaultClient = &Client{hc: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
}}
|
||||
|
||||
// NotDefinedError is returned when requested metadata is not defined.
|
||||
//
|
||||
@@ -206,10 +195,9 @@ func systemInfoSuggestsGCE() bool {
|
||||
return name == "Google" || name == "Google Compute Engine"
|
||||
}
|
||||
|
||||
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
|
||||
// ResponseHeaderTimeout).
|
||||
// Subscribe calls Client.Subscribe on the default client.
|
||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
return subscribeClient.Subscribe(suffix, fn)
|
||||
return defaultClient.Subscribe(suffix, fn)
|
||||
}
|
||||
|
||||
// Get calls Client.Get on the default client.
|
||||
@@ -280,9 +268,14 @@ type Client struct {
|
||||
hc *http.Client
|
||||
}
|
||||
|
||||
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
|
||||
// will use the given http.Client instead of the default client.
|
||||
// NewClient returns a Client that can be used to fetch metadata.
|
||||
// Returns the client that uses the specified http.Client for HTTP requests.
|
||||
// If nil is specified, returns the default client.
|
||||
func NewClient(c *http.Client) *Client {
|
||||
if c == nil {
|
||||
return defaultClient
|
||||
}
|
||||
|
||||
return &Client{hc: c}
|
||||
}
|
||||
|
||||
|
||||
32
vendor/github.com/Azure/go-autorest/.gitignore
generated
vendored
Normal file
32
vendor/github.com/Azure/go-autorest/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore)
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
.DS_Store
|
||||
.idea/
|
||||
.vscode/
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
# go-autorest specific
|
||||
vendor/
|
||||
autorest/azure/example/example
|
||||
1004
vendor/github.com/Azure/go-autorest/CHANGELOG.md
generated
vendored
Normal file
1004
vendor/github.com/Azure/go-autorest/CHANGELOG.md
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
23
vendor/github.com/Azure/go-autorest/GNUmakefile
generated
vendored
Normal file
23
vendor/github.com/Azure/go-autorest/GNUmakefile
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
DIR?=./autorest/
|
||||
|
||||
default: build
|
||||
|
||||
build: fmt
|
||||
go install $(DIR)
|
||||
|
||||
test:
|
||||
go test $(DIR) || exit 1
|
||||
|
||||
vet:
|
||||
@echo "go vet ."
|
||||
@go vet $(DIR)... ; if [ $$? -eq 1 ]; then \
|
||||
echo ""; \
|
||||
echo "Vet found suspicious constructs. Please check the reported constructs"; \
|
||||
echo "and fix them if necessary before submitting the code for review."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
fmt:
|
||||
gofmt -w $(DIR)
|
||||
|
||||
.PHONY: build test vet fmt
|
||||
324
vendor/github.com/Azure/go-autorest/Gopkg.lock
generated
vendored
Normal file
324
vendor/github.com/Azure/go-autorest/Gopkg.lock
generated
vendored
Normal file
@@ -0,0 +1,324 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e"
|
||||
name = "contrib.go.opencensus.io/exporter/ocagent"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea"
|
||||
version = "v0.6.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20"
|
||||
name = "github.com/census-instrumentation/opencensus-proto"
|
||||
packages = [
|
||||
"gen-go/agent/common/v1",
|
||||
"gen-go/agent/metrics/v1",
|
||||
"gen-go/agent/trace/v1",
|
||||
"gen-go/metrics/v1",
|
||||
"gen-go/resource/v1",
|
||||
"gen-go/trace/v1",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "d89fa54de508111353cb0b06403c00569be780d8"
|
||||
version = "v0.2.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
pruneopts = "UT"
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55"
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965"
|
||||
name = "github.com/dimchansky/utfbom"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
|
||||
name = "github.com/golang/groupcache"
|
||||
packages = ["lru"]
|
||||
pruneopts = "UT"
|
||||
revision = "611e8accdfc92c4187d399e95ce826046d4c8d73"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"descriptor",
|
||||
"jsonpb",
|
||||
"proto",
|
||||
"protoc-gen-go/descriptor",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/struct",
|
||||
"ptypes/timestamp",
|
||||
"ptypes/wrappers",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
|
||||
version = "v1.3.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806"
|
||||
name = "github.com/grpc-ecosystem/grpc-gateway"
|
||||
packages = [
|
||||
"internal",
|
||||
"runtime",
|
||||
"utilities",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009"
|
||||
version = "v1.12.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79"
|
||||
name = "github.com/mitchellh/go-homedir"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "af06845cf3004701891bf4fdb884bfe4920b3727"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
pruneopts = "UT"
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"require",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "221dbe5ed46703ee255b1da0dec05086f5035f62"
|
||||
version = "v1.4.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71"
|
||||
name = "go.opencensus.io"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"internal/tagencoding",
|
||||
"metric/metricdata",
|
||||
"metric/metricproducer",
|
||||
"plugin/ocgrpc",
|
||||
"plugin/ochttp",
|
||||
"plugin/ochttp/propagation/b3",
|
||||
"plugin/ochttp/propagation/tracecontext",
|
||||
"resource",
|
||||
"stats",
|
||||
"stats/internal",
|
||||
"stats/view",
|
||||
"tag",
|
||||
"trace",
|
||||
"trace/internal",
|
||||
"trace/propagation",
|
||||
"trace/tracestate",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "aad2c527c5defcf89b5afab7f37274304195a6b2"
|
||||
version = "v0.22.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"pkcs12",
|
||||
"pkcs12/internal/rc2",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "e9b2fee46413994441b28dfca259d911d963dfed"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43"
|
||||
name = "golang.org/x/lint"
|
||||
packages = [
|
||||
".",
|
||||
"golint",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"trace",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "1ddd1de85cb0337b623b740a609d35817d516a8d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b"
|
||||
name = "golang.org/x/sync"
|
||||
packages = ["semaphore"]
|
||||
pruneopts = "UT"
|
||||
revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
pruneopts = "UT"
|
||||
revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/language",
|
||||
"internal/language/compact",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
|
||||
version = "v0.3.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7"
|
||||
name = "golang.org/x/tools"
|
||||
packages = [
|
||||
"go/ast/astutil",
|
||||
"go/gcexportdata",
|
||||
"go/internal/gcimporter",
|
||||
"go/types/typeutil",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877"
|
||||
name = "google.golang.org/api"
|
||||
packages = ["support/bundler"]
|
||||
pruneopts = "UT"
|
||||
revision = "8a410c21381766a810817fd6200fce8838ecb277"
|
||||
version = "v0.14.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = [
|
||||
"googleapis/api/httpbody",
|
||||
"googleapis/rpc/status",
|
||||
"protobuf/field_mask",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "51378566eb590fa106d1025ea12835a4416dda84"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301"
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"backoff",
|
||||
"balancer",
|
||||
"balancer/base",
|
||||
"balancer/roundrobin",
|
||||
"binarylog/grpc_binarylog_v1",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"credentials/internal",
|
||||
"encoding",
|
||||
"encoding/proto",
|
||||
"grpclog",
|
||||
"internal",
|
||||
"internal/backoff",
|
||||
"internal/balancerload",
|
||||
"internal/binarylog",
|
||||
"internal/buffer",
|
||||
"internal/channelz",
|
||||
"internal/envconfig",
|
||||
"internal/grpcrand",
|
||||
"internal/grpcsync",
|
||||
"internal/resolver/dns",
|
||||
"internal/resolver/passthrough",
|
||||
"internal/syscall",
|
||||
"internal/transport",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"resolver",
|
||||
"serviceconfig",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514"
|
||||
version = "v1.25.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce"
|
||||
version = "v2.2.7"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"contrib.go.opencensus.io/exporter/ocagent",
|
||||
"github.com/dgrijalva/jwt-go",
|
||||
"github.com/dimchansky/utfbom",
|
||||
"github.com/mitchellh/go-homedir",
|
||||
"github.com/stretchr/testify/require",
|
||||
"go.opencensus.io/plugin/ochttp",
|
||||
"go.opencensus.io/plugin/ochttp/propagation/tracecontext",
|
||||
"go.opencensus.io/stats/view",
|
||||
"go.opencensus.io/trace",
|
||||
"golang.org/x/crypto/pkcs12",
|
||||
"golang.org/x/lint/golint",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
59
vendor/github.com/Azure/go-autorest/Gopkg.toml
generated
vendored
Normal file
59
vendor/github.com/Azure/go-autorest/Gopkg.toml
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
#
|
||||
# [prune]
|
||||
# non-go = false
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
|
||||
required = ["golang.org/x/lint/golint"]
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
|
||||
[[constraint]]
|
||||
name = "contrib.go.opencensus.io/exporter/ocagent"
|
||||
version = "0.6.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
version = "3.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/dimchansky/utfbom"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/mitchellh/go-homedir"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "1.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "go.opencensus.io"
|
||||
version = "0.22.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
191
vendor/github.com/Azure/go-autorest/LICENSE
generated
vendored
Normal file
191
vendor/github.com/Azure/go-autorest/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2015 Microsoft Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
165
vendor/github.com/Azure/go-autorest/README.md
generated
vendored
Normal file
165
vendor/github.com/Azure/go-autorest/README.md
generated
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
# go-autorest
|
||||
|
||||
[](https://godoc.org/github.com/Azure/go-autorest/autorest)
|
||||
[](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master)
|
||||
[](https://goreportcard.com/report/Azure/go-autorest)
|
||||
|
||||
Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages.
|
||||
|
||||
An authentication client tested with Azure Active Directory (AAD) is also
|
||||
provided in this repo in the package
|
||||
`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package
|
||||
is maintained only as part of the Azure Go SDK and is not related to other
|
||||
"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD).
|
||||
|
||||
## Overview
|
||||
|
||||
Package go-autorest implements an HTTP request pipeline suitable for use across
|
||||
multiple goroutines and provides the shared routines used by packages generated
|
||||
by [Autorest](https://github.com/Azure/autorest.go).
|
||||
|
||||
The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
|
||||
and Responding. A typical pattern is:
|
||||
|
||||
```go
|
||||
req, err := Prepare(&http.Request{},
|
||||
token.WithAuthorization())
|
||||
|
||||
resp, err := Send(req,
|
||||
WithLogging(logger),
|
||||
DoErrorIfStatusCode(http.StatusInternalServerError),
|
||||
DoCloseIfError(),
|
||||
DoRetryForAttempts(5, time.Second))
|
||||
|
||||
err = Respond(resp,
|
||||
ByDiscardingBody(),
|
||||
ByClosing())
|
||||
```
|
||||
|
||||
Each phase relies on decorators to modify and / or manage processing. Decorators may first modify
|
||||
and then pass the data along, pass the data first and then modify the result, or wrap themselves
|
||||
around passing the data (such as a logger might do). Decorators run in the order provided. For
|
||||
example, the following:
|
||||
|
||||
```go
|
||||
req, err := Prepare(&http.Request{},
|
||||
WithBaseURL("https://microsoft.com/"),
|
||||
WithPath("a"),
|
||||
WithPath("b"),
|
||||
WithPath("c"))
|
||||
```
|
||||
|
||||
will set the URL to:
|
||||
|
||||
```
|
||||
https://microsoft.com/a/b/c
|
||||
```
|
||||
|
||||
Preparers and Responders may be shared and re-used (assuming the underlying decorators support
|
||||
sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders
|
||||
shared among multiple go-routines, and a single Sender shared among multiple sending go-routines,
|
||||
all bound together by means of input / output channels.
|
||||
|
||||
Decorators hold their passed state within a closure (such as the path components in the example
|
||||
above). Be careful to share Preparers and Responders only in a context where such held state
|
||||
applies. For example, it may not make sense to share a Preparer that applies a query string from a
|
||||
fixed set of values. Similarly, sharing a Responder that reads the response body into a passed
|
||||
struct (e.g., `ByUnmarshallingJson`) is likely incorrect.
|
||||
|
||||
Errors raised by autorest objects and methods will conform to the `autorest.Error` interface.
|
||||
|
||||
See the included examples for more detail. For details on the suggested use of this package by
|
||||
generated clients, see the Client described below.
|
||||
|
||||
## Helpers
|
||||
|
||||
### Handling Swagger Dates
|
||||
|
||||
The Swagger specification (https://swagger.io) that drives AutoRest
|
||||
(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The
|
||||
github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct
|
||||
parsing and formatting.
|
||||
|
||||
### Handling Empty Values
|
||||
|
||||
In JSON, missing values have different semantics than empty values. This is especially true for
|
||||
services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains
|
||||
only those values to modify. Missing values are to be left unchanged. Developers, then, require a
|
||||
means to both specify an empty value and to leave the value out of the submitted JSON.
|
||||
|
||||
The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits
|
||||
empty values from the rendered JSON. Since Go defines default values for all base types (such as ""
|
||||
for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package
|
||||
treats default values as meaning empty, omitting them from the rendered JSON. This means that, using
|
||||
the Go base types encoded through the default JSON package, it is not possible to create JSON to
|
||||
clear a value at the server.
|
||||
|
||||
The workaround within the Go community is to use pointers to base types in lieu of base types within
|
||||
structures that map to JSON. For example, instead of a value of type `string`, the workaround uses
|
||||
`*string`. While this enables distinguishing empty values from those to be unchanged, creating
|
||||
pointers to a base type (notably constant, in-line values) requires additional variables. This, for
|
||||
example,
|
||||
|
||||
```go
|
||||
s := struct {
|
||||
S *string
|
||||
}{ S: &"foo" }
|
||||
```
|
||||
fails, while, this
|
||||
|
||||
```go
|
||||
v := "foo"
|
||||
s := struct {
|
||||
S *string
|
||||
}{ S: &v }
|
||||
```
|
||||
succeeds.
|
||||
|
||||
To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for
|
||||
Go base types which have Swagger analogs. It also provides a helper that converts between
|
||||
`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value
|
||||
associated with a key should be cleared. With the helpers, the previous example becomes
|
||||
|
||||
```go
|
||||
s := struct {
|
||||
S *string
|
||||
}{ S: to.StringPtr("foo") }
|
||||
```
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get github.com/Azure/go-autorest/autorest
|
||||
go get github.com/Azure/go-autorest/autorest/azure
|
||||
go get github.com/Azure/go-autorest/autorest/date
|
||||
go get github.com/Azure/go-autorest/autorest/to
|
||||
```
|
||||
|
||||
### Using with Go Modules
|
||||
In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules.
|
||||
|
||||
- autorest/adal
|
||||
- autorest/azure/auth
|
||||
- autorest/azure/cli
|
||||
- autorest/date
|
||||
- autorest/mocks
|
||||
- autorest/to
|
||||
- autorest/validation
|
||||
- autorest
|
||||
- logger
|
||||
- tracing
|
||||
|
||||
Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules.
|
||||
|
||||
## License
|
||||
|
||||
See LICENSE file.
|
||||
|
||||
-----
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of
|
||||
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
||||
see the [Code of Conduct
|
||||
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
||||
questions or comments.
|
||||
4
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
generated
vendored
4
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
generated
vendored
@@ -222,6 +222,10 @@ func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code
|
||||
case "code_expired":
|
||||
return nil, ErrDeviceCodeExpired
|
||||
default:
|
||||
// return a more meaningful error message if available
|
||||
if token.ErrorDescription != nil {
|
||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, *token.Error, *token.ErrorDescription)
|
||||
}
|
||||
return nil, ErrDeviceGeneric
|
||||
}
|
||||
}
|
||||
|
||||
12
vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
generated
vendored
12
vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
generated
vendored
@@ -3,10 +3,10 @@ module github.com/Azure/go-autorest/autorest/adal
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/Azure/go-autorest/autorest v0.9.0
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0
|
||||
github.com/Azure/go-autorest/tracing v0.5.0
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1
|
||||
github.com/Azure/go-autorest/tracing v0.6.0
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0
|
||||
)
|
||||
|
||||
33
vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
generated
vendored
33
vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
generated
vendored
@@ -1,26 +1,17 @@
|
||||
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
||||
4
vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go
generated
vendored
4
vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go
generated
vendored
@@ -16,9 +16,9 @@ package adal
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of
|
||||
// This file, and the github.com/Azure/go-autorest import, won't actually become part of
|
||||
// the resultant binary.
|
||||
|
||||
// Necessary for safely adding multi-module repo.
|
||||
// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
|
||||
import _ "github.com/Azure/go-autorest/autorest"
|
||||
import _ "github.com/Azure/go-autorest"
|
||||
|
||||
62
vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
generated
vendored
62
vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
generated
vendored
@@ -15,11 +15,24 @@ package adal
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"golang.org/x/crypto/pkcs12"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrMissingCertificate is returned when no local certificate is found in the provided PFX data.
|
||||
ErrMissingCertificate = errors.New("adal: certificate missing")
|
||||
|
||||
// ErrMissingPrivateKey is returned when no private key is found in the provided PFX data.
|
||||
ErrMissingPrivateKey = errors.New("adal: private key missing")
|
||||
)
|
||||
|
||||
// LoadToken restores a Token object from a file located at 'path'.
|
||||
@@ -71,3 +84,52 @@ func SaveToken(path string, mode os.FileMode, token Token) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecodePfxCertificateData extracts the x509 certificate and RSA private key from the provided PFX data.
|
||||
// The PFX data must contain a private key along with a certificate whose public key matches that of the
|
||||
// private key or an error is returned.
|
||||
// If the private key is not password protected pass the empty string for password.
|
||||
func DecodePfxCertificateData(pfxData []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
blocks, err := pkcs12.ToPEM(pfxData, password)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// first extract the private key
|
||||
var priv *rsa.PrivateKey
|
||||
for _, block := range blocks {
|
||||
if block.Type == "PRIVATE KEY" {
|
||||
priv, err = x509.ParsePKCS1PrivateKey(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if priv == nil {
|
||||
return nil, nil, ErrMissingPrivateKey
|
||||
}
|
||||
// now find the certificate with the matching public key of our private key
|
||||
var cert *x509.Certificate
|
||||
for _, block := range blocks {
|
||||
if block.Type == "CERTIFICATE" {
|
||||
pcert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
certKey, ok := pcert.PublicKey.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
// keep looking
|
||||
continue
|
||||
}
|
||||
if priv.E == certKey.E && priv.N.Cmp(certKey.N) == 0 {
|
||||
// found a match
|
||||
cert = pcert
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if cert == nil {
|
||||
return nil, nil, ErrMissingCertificate
|
||||
}
|
||||
return cert, priv, nil
|
||||
}
|
||||
|
||||
87
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
87
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
@@ -35,7 +35,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/date"
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
"github.com/form3tech-oss/jwt-go"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -62,6 +62,9 @@ const (
|
||||
// msiEndpoint is the well known endpoint for getting MSI authentications tokens
|
||||
msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||
|
||||
// the API version to use for the MSI endpoint
|
||||
msiAPIVersion = "2018-02-01"
|
||||
|
||||
// the default number of attempts to refresh an MSI authentication token
|
||||
defaultMaxMSIRefreshAttempts = 5
|
||||
|
||||
@@ -70,6 +73,9 @@ const (
|
||||
|
||||
// asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions
|
||||
asMSISecretEnv = "MSI_SECRET"
|
||||
|
||||
// the API version to use for the App Service MSI endpoint
|
||||
appServiceAPIVersion = "2017-09-01"
|
||||
)
|
||||
|
||||
// OAuthTokenProvider is an interface which should be implemented by an access token retriever
|
||||
@@ -354,6 +360,7 @@ type ServicePrincipalToken struct {
|
||||
customRefreshFunc TokenRefresh
|
||||
refreshCallbacks []TokenRefreshCallback
|
||||
// MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token.
|
||||
// Settings this to a value less than 1 will use the default value.
|
||||
MaxMSIRefreshAttempts int
|
||||
}
|
||||
|
||||
@@ -650,6 +657,8 @@ func GetMSIVMEndpoint() (string, error) {
|
||||
return msiEndpoint, nil
|
||||
}
|
||||
|
||||
// NOTE: this only indicates if the ASE environment credentials have been set
|
||||
// which does not necessarily mean that the caller is authenticating via ASE!
|
||||
func isAppService() bool {
|
||||
_, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv)
|
||||
_, asMSISecretEnvExists := os.LookupEnv(asMSISecretEnv)
|
||||
@@ -678,16 +687,22 @@ func GetMSIEndpoint() (string, error) {
|
||||
// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension.
|
||||
// It will use the system assigned identity when creating the token.
|
||||
func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...)
|
||||
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, nil, callbacks...)
|
||||
}
|
||||
|
||||
// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension.
|
||||
// It will use the specified user assigned identity when creating the token.
|
||||
// It will use the clientID of specified user assigned identity when creating the token.
|
||||
func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...)
|
||||
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, nil, callbacks...)
|
||||
}
|
||||
|
||||
func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||
// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension.
|
||||
// It will use the azure resource id of user assigned identity when creating the token.
|
||||
func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, &identityResourceID, callbacks...)
|
||||
}
|
||||
|
||||
func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, identityResourceID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||
if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -699,6 +714,11 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if identityResourceID != nil {
|
||||
if err := validateStringParam(*identityResourceID, "identityResourceID"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// We set the oauth config token endpoint to be MSI's endpoint
|
||||
msiEndpointURL, err := url.Parse(msiEndpoint)
|
||||
if err != nil {
|
||||
@@ -709,13 +729,16 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
|
||||
v.Set("resource", resource)
|
||||
// App Service MSI currently only supports token API version 2017-09-01
|
||||
if isAppService() {
|
||||
v.Set("api-version", "2017-09-01")
|
||||
v.Set("api-version", appServiceAPIVersion)
|
||||
} else {
|
||||
v.Set("api-version", "2018-02-01")
|
||||
v.Set("api-version", msiAPIVersion)
|
||||
}
|
||||
if userAssignedID != nil {
|
||||
v.Set("client_id", *userAssignedID)
|
||||
}
|
||||
if identityResourceID != nil {
|
||||
v.Set("mi_res_id", *identityResourceID)
|
||||
}
|
||||
msiEndpointURL.RawQuery = v.Encode()
|
||||
|
||||
spt := &ServicePrincipalToken{
|
||||
@@ -771,8 +794,9 @@ func (spt *ServicePrincipalToken) EnsureFresh() error {
|
||||
// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
|
||||
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
|
||||
func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
|
||||
if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) {
|
||||
// take the write lock then check to see if the token was already refreshed
|
||||
// must take the read lock when initially checking the token's expiration
|
||||
if spt.inner.AutoRefresh && spt.Token().WillExpireIn(spt.inner.RefreshWithin) {
|
||||
// take the write lock then check again to see if the token was already refreshed
|
||||
spt.refreshLock.Lock()
|
||||
defer spt.refreshLock.Unlock()
|
||||
if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) {
|
||||
@@ -835,11 +859,28 @@ func (spt *ServicePrincipalToken) getGrantType() string {
|
||||
}
|
||||
|
||||
func isIMDS(u url.URL) bool {
|
||||
imds, err := url.Parse(msiEndpoint)
|
||||
return isMSIEndpoint(u) == true || isASEEndpoint(u) == true
|
||||
}
|
||||
|
||||
func isMSIEndpoint(endpoint url.URL) bool {
|
||||
msi, err := url.Parse(msiEndpoint)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return (u.Host == imds.Host && u.Path == imds.Path) || isAppService()
|
||||
return endpoint.Host == msi.Host && endpoint.Path == msi.Path
|
||||
}
|
||||
|
||||
func isASEEndpoint(endpoint url.URL) bool {
|
||||
aseEndpoint, err := GetMSIAppServiceEndpoint()
|
||||
if err != nil {
|
||||
// app service environment isn't enabled
|
||||
return false
|
||||
}
|
||||
ase, err := url.Parse(aseEndpoint)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return endpoint.Host == ase.Host && endpoint.Path == ase.Path
|
||||
}
|
||||
|
||||
func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error {
|
||||
@@ -858,7 +899,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
|
||||
}
|
||||
req.Header.Add("User-Agent", UserAgent())
|
||||
// Add header when runtime is on App Service or Functions
|
||||
if isAppService() {
|
||||
if isASEEndpoint(spt.inner.OauthConfig.TokenEndpoint) {
|
||||
asMSISecret, _ := os.LookupEnv(asMSISecretEnv)
|
||||
req.Header.Add("Secret", asMSISecret)
|
||||
}
|
||||
@@ -900,6 +941,14 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
if isMSIEndpoint(spt.inner.OauthConfig.TokenEndpoint) {
|
||||
resp, err = getMSIEndpoint(ctx, spt.sender)
|
||||
if err != nil {
|
||||
// return a TokenRefreshError here so that we don't keep retrying
|
||||
return newTokenRefreshError(fmt.Sprintf("the MSI endpoint is not available. Failed HTTP request to MSI endpoint: %v", err), nil)
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
if isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
|
||||
resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts)
|
||||
} else {
|
||||
@@ -972,6 +1021,11 @@ func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http
|
||||
attempt := 0
|
||||
delay := time.Duration(0)
|
||||
|
||||
// maxAttempts is user-specified, ensure that its value is greater than zero else no request will be made
|
||||
if maxAttempts < 1 {
|
||||
maxAttempts = defaultMaxMSIRefreshAttempts
|
||||
}
|
||||
|
||||
for attempt < maxAttempts {
|
||||
if resp != nil && resp.Body != nil {
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
@@ -1133,3 +1187,12 @@ func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig,
|
||||
}
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
// MSIAvailable returns true if the MSI endpoint is available for authentication.
|
||||
func MSIAvailable(ctx context.Context, sender Sender) bool {
|
||||
resp, err := getMSIEndpoint(ctx, sender)
|
||||
if err == nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
return err == nil
|
||||
}
|
||||
|
||||
36
vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go
generated
vendored
Normal file
36
vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
// +build go1.13
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package adal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) {
|
||||
// this cannot fail, the return sig is due to legacy reasons
|
||||
msiEndpoint, _ := GetMSIVMEndpoint()
|
||||
tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
|
||||
defer cancel()
|
||||
// http.NewRequestWithContext() was added in Go 1.13
|
||||
req, _ := http.NewRequestWithContext(tempCtx, http.MethodGet, msiEndpoint, nil)
|
||||
q := req.URL.Query()
|
||||
q.Add("api-version", msiAPIVersion)
|
||||
req.URL.RawQuery = q.Encode()
|
||||
return sender.Do(req)
|
||||
}
|
||||
36
vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go
generated
vendored
Normal file
36
vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
// +build !go1.13
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package adal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) {
|
||||
// this cannot fail, the return sig is due to legacy reasons
|
||||
msiEndpoint, _ := GetMSIVMEndpoint()
|
||||
tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
|
||||
defer cancel()
|
||||
req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil)
|
||||
req = req.WithContext(tempCtx)
|
||||
q := req.URL.Query()
|
||||
q.Add("api-version", msiAPIVersion)
|
||||
req.URL.RawQuery = q.Encode()
|
||||
return sender.Do(req)
|
||||
}
|
||||
7
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
7
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
@@ -138,6 +138,11 @@ func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator {
|
||||
}
|
||||
}
|
||||
|
||||
// TokenProvider returns OAuthTokenProvider so that it can be used for authorization outside the REST.
|
||||
func (ba *BearerAuthorizer) TokenProvider() adal.OAuthTokenProvider {
|
||||
return ba.tokenProvider
|
||||
}
|
||||
|
||||
// BearerAuthorizerCallbackFunc is the authentication callback signature.
|
||||
type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error)
|
||||
|
||||
@@ -331,7 +336,7 @@ func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator {
|
||||
for i := range auxTokens {
|
||||
auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i])
|
||||
}
|
||||
return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; ")))
|
||||
return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, ", ")))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
8
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
8
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
@@ -167,7 +167,13 @@ func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Clien
|
||||
cancelCtx, cancel = context.WithTimeout(ctx, d)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// if the initial response has a Retry-After, sleep for the specified amount of time before starting to poll
|
||||
if delay, ok := f.GetPollingDelay(); ok {
|
||||
if delayElapsed := autorest.DelayForBackoff(delay, 0, cancelCtx.Done()); !delayElapsed {
|
||||
err = cancelCtx.Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
done, err := f.DoneWithContext(ctx, client)
|
||||
for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) {
|
||||
if attempts >= client.RetryAttempts {
|
||||
|
||||
9
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
9
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
@@ -127,7 +127,7 @@ var (
|
||||
KeyVaultDNSSuffix: "vault.usgovcloudapi.net",
|
||||
ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net",
|
||||
ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net",
|
||||
ContainerRegistryDNSSuffix: "azurecr.us",
|
||||
CosmosDBDNSSuffix: "documents.azure.us",
|
||||
TokenAudience: "https://management.usgovcloudapi.net/",
|
||||
@@ -160,7 +160,7 @@ var (
|
||||
KeyVaultDNSSuffix: "vault.azure.cn",
|
||||
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn",
|
||||
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.azure.cn",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn",
|
||||
ContainerRegistryDNSSuffix: "azurecr.cn",
|
||||
CosmosDBDNSSuffix: "documents.azure.cn",
|
||||
TokenAudience: "https://management.chinacloudapi.cn/",
|
||||
@@ -242,3 +242,8 @@ func EnvironmentFromFile(location string) (unmarshaled Environment, err error) {
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// SetEnvironment updates the environment map with the specified values.
|
||||
func SetEnvironment(name string, env Environment) {
|
||||
environments[strings.ToUpper(name)] = env
|
||||
}
|
||||
|
||||
2
vendor/github.com/Azure/go-autorest/autorest/date/go.mod
generated
vendored
2
vendor/github.com/Azure/go-autorest/autorest/date/go.mod
generated
vendored
@@ -2,4 +2,4 @@ module github.com/Azure/go-autorest/autorest/date
|
||||
|
||||
go 1.12
|
||||
|
||||
require github.com/Azure/go-autorest/autorest v0.9.0
|
||||
require github.com/Azure/go-autorest v14.2.0+incompatible
|
||||
|
||||
18
vendor/github.com/Azure/go-autorest/autorest/date/go.sum
generated
vendored
18
vendor/github.com/Azure/go-autorest/autorest/date/go.sum
generated
vendored
@@ -1,16 +1,2 @@
|
||||
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
|
||||
4
vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go
generated
vendored
4
vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go
generated
vendored
@@ -16,9 +16,9 @@ package date
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of
|
||||
// This file, and the github.com/Azure/go-autorest import, won't actually become part of
|
||||
// the resultant binary.
|
||||
|
||||
// Necessary for safely adding multi-module repo.
|
||||
// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
|
||||
import _ "github.com/Azure/go-autorest/autorest"
|
||||
import _ "github.com/Azure/go-autorest"
|
||||
|
||||
11
vendor/github.com/Azure/go-autorest/autorest/go.mod
generated
vendored
11
vendor/github.com/Azure/go-autorest/autorest/go.mod
generated
vendored
@@ -3,9 +3,10 @@ module github.com/Azure/go-autorest/autorest
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.2
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0
|
||||
github.com/Azure/go-autorest/logger v0.1.0
|
||||
github.com/Azure/go-autorest/tracing v0.5.0
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.0
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.0
|
||||
github.com/Azure/go-autorest/logger v0.2.0
|
||||
github.com/Azure/go-autorest/tracing v0.6.0
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
|
||||
)
|
||||
|
||||
35
vendor/github.com/Azure/go-autorest/autorest/go.sum
generated
vendored
35
vendor/github.com/Azure/go-autorest/autorest/go.sum
generated
vendored
@@ -1,28 +1,21 @@
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
|
||||
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
||||
24
vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go
generated
vendored
Normal file
24
vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
// +build modhack
|
||||
|
||||
package autorest
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file, and the github.com/Azure/go-autorest import, won't actually become part of
|
||||
// the resultant binary.
|
||||
|
||||
// Necessary for safely adding multi-module repo.
|
||||
// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
|
||||
import _ "github.com/Azure/go-autorest"
|
||||
19
vendor/github.com/Azure/go-autorest/autorest/sender.go
generated
vendored
19
vendor/github.com/Azure/go-autorest/autorest/sender.go
generated
vendored
@@ -257,6 +257,12 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
|
||||
}
|
||||
}
|
||||
|
||||
// Count429AsRetry indicates that a 429 response should be included as a retry attempt.
|
||||
var Count429AsRetry = true
|
||||
|
||||
// Max429Delay is the maximum duration to wait between retries on a 429 if no Retry-After header was received.
|
||||
var Max429Delay time.Duration
|
||||
|
||||
// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified
|
||||
// number of attempts, exponentially backing off between requests using the supplied backoff
|
||||
// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request.
|
||||
@@ -264,7 +270,7 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
|
||||
func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
|
||||
return func(s Sender) Sender {
|
||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
||||
return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...)
|
||||
return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, 0, codes...)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -276,7 +282,7 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se
|
||||
func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator {
|
||||
return func(s Sender) Sender {
|
||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
||||
return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...)
|
||||
return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, cap, codes...)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -297,11 +303,10 @@ func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempt
|
||||
return resp, err
|
||||
}
|
||||
delayed := DelayWithRetryAfter(resp, r.Context().Done())
|
||||
// enforce a 2 minute cap between requests when 429 status codes are
|
||||
// not going to be counted as an attempt and when the cap is 0.
|
||||
// this should only happen in the absence of a retry-after header.
|
||||
if !count429 && cap == 0 {
|
||||
cap = 2 * time.Minute
|
||||
// if this was a 429 set the delay cap as specified.
|
||||
// applicable only in the absence of a retry-after header.
|
||||
if resp != nil && resp.StatusCode == http.StatusTooManyRequests {
|
||||
cap = Max429Delay
|
||||
}
|
||||
if !delayed && !DelayForBackoffWithCap(backoff, cap, delayCount, r.Context().Done()) {
|
||||
return resp, r.Context().Err()
|
||||
|
||||
2
vendor/github.com/Azure/go-autorest/autorest/version.go
generated
vendored
2
vendor/github.com/Azure/go-autorest/autorest/version.go
generated
vendored
@@ -19,7 +19,7 @@ import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
const number = "v13.4.0"
|
||||
const number = "v14.2.1"
|
||||
|
||||
var (
|
||||
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
|
||||
|
||||
105
vendor/github.com/Azure/go-autorest/azure-pipelines.yml
generated
vendored
Normal file
105
vendor/github.com/Azure/go-autorest/azure-pipelines.yml
generated
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
variables:
|
||||
GOPATH: '$(system.defaultWorkingDirectory)/work'
|
||||
sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)'
|
||||
|
||||
jobs:
|
||||
- job: 'goautorest'
|
||||
displayName: 'Run go-autorest CI Checks'
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
Linux_Go113:
|
||||
vm.image: 'ubuntu-18.04'
|
||||
go.version: '1.13'
|
||||
Linux_Go114:
|
||||
vm.image: 'ubuntu-18.04'
|
||||
go.version: '1.14'
|
||||
|
||||
pool:
|
||||
vmImage: '$(vm.image)'
|
||||
|
||||
steps:
|
||||
- task: GoTool@0
|
||||
inputs:
|
||||
version: '$(go.version)'
|
||||
displayName: "Select Go Version"
|
||||
|
||||
- script: |
|
||||
set -e
|
||||
mkdir -p '$(GOPATH)/bin'
|
||||
mkdir -p '$(sdkPath)'
|
||||
shopt -s extglob
|
||||
mv !(work) '$(sdkPath)'
|
||||
echo '##vso[task.prependpath]$(GOPATH)/bin'
|
||||
displayName: 'Create Go Workspace'
|
||||
|
||||
- script: |
|
||||
set -e
|
||||
curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
||||
dep ensure -v
|
||||
go install ./vendor/golang.org/x/lint/golint
|
||||
go get github.com/jstemmer/go-junit-report
|
||||
go get github.com/axw/gocov/gocov
|
||||
go get github.com/AlekSi/gocov-xml
|
||||
go get -u github.com/matm/gocov-html
|
||||
workingDirectory: '$(sdkPath)'
|
||||
displayName: 'Install Dependencies'
|
||||
|
||||
- script: |
|
||||
go vet ./autorest/...
|
||||
go vet ./logger/...
|
||||
go vet ./tracing/...
|
||||
workingDirectory: '$(sdkPath)'
|
||||
displayName: 'Vet'
|
||||
|
||||
- script: |
|
||||
go build -v ./autorest/...
|
||||
go build -v ./logger/...
|
||||
go build -v ./tracing/...
|
||||
workingDirectory: '$(sdkPath)'
|
||||
displayName: 'Build'
|
||||
|
||||
- script: |
|
||||
set -e
|
||||
go test -race -v -coverprofile=coverage.txt -covermode atomic ./autorest/... ./logger/... ./tracing/... 2>&1 | go-junit-report > report.xml
|
||||
gocov convert coverage.txt > coverage.json
|
||||
gocov-xml < coverage.json > coverage.xml
|
||||
gocov-html < coverage.json > coverage.html
|
||||
workingDirectory: '$(sdkPath)'
|
||||
displayName: 'Run Tests'
|
||||
|
||||
- script: grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee >&2
|
||||
workingDirectory: '$(sdkPath)'
|
||||
displayName: 'Copyright Header Check'
|
||||
failOnStderr: true
|
||||
condition: succeededOrFailed()
|
||||
|
||||
- script: |
|
||||
gofmt -s -l -w ./autorest/. >&2
|
||||
gofmt -s -l -w ./logger/. >&2
|
||||
gofmt -s -l -w ./tracing/. >&2
|
||||
workingDirectory: '$(sdkPath)'
|
||||
displayName: 'Format Check'
|
||||
failOnStderr: true
|
||||
condition: succeededOrFailed()
|
||||
|
||||
- script: |
|
||||
golint ./autorest/... >&2
|
||||
golint ./logger/... >&2
|
||||
golint ./tracing/... >&2
|
||||
workingDirectory: '$(sdkPath)'
|
||||
displayName: 'Linter Check'
|
||||
failOnStderr: true
|
||||
condition: succeededOrFailed()
|
||||
|
||||
- task: PublishTestResults@2
|
||||
inputs:
|
||||
testRunner: JUnit
|
||||
testResultsFiles: $(sdkPath)/report.xml
|
||||
failTaskOnFailedTests: true
|
||||
|
||||
- task: PublishCodeCoverageResults@1
|
||||
inputs:
|
||||
codeCoverageTool: Cobertura
|
||||
summaryFileLocation: $(sdkPath)/coverage.xml
|
||||
additionalCodeCoverageFiles: $(sdkPath)/coverage.html
|
||||
18
vendor/github.com/Azure/go-autorest/doc.go
generated
vendored
Normal file
18
vendor/github.com/Azure/go-autorest/doc.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
/*
|
||||
Package go-autorest provides an HTTP request client for use with Autorest-generated API client packages.
|
||||
*/
|
||||
package go_autorest
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
2
vendor/github.com/Azure/go-autorest/logger/go.mod
generated
vendored
2
vendor/github.com/Azure/go-autorest/logger/go.mod
generated
vendored
@@ -1,3 +1,5 @@
|
||||
module github.com/Azure/go-autorest/logger
|
||||
|
||||
go 1.12
|
||||
|
||||
require github.com/Azure/go-autorest v14.2.0+incompatible
|
||||
|
||||
2
vendor/github.com/Azure/go-autorest/logger/go.sum
generated
vendored
Normal file
2
vendor/github.com/Azure/go-autorest/logger/go.sum
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
24
vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go
generated
vendored
Normal file
24
vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
// +build modhack
|
||||
|
||||
package logger
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file, and the github.com/Azure/go-autorest import, won't actually become part of
|
||||
// the resultant binary.
|
||||
|
||||
// Necessary for safely adding multi-module repo.
|
||||
// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
|
||||
import _ "github.com/Azure/go-autorest"
|
||||
2
vendor/github.com/Azure/go-autorest/tracing/go.mod
generated
vendored
2
vendor/github.com/Azure/go-autorest/tracing/go.mod
generated
vendored
@@ -1,3 +1,5 @@
|
||||
module github.com/Azure/go-autorest/tracing
|
||||
|
||||
go 1.12
|
||||
|
||||
require github.com/Azure/go-autorest v14.2.0+incompatible
|
||||
|
||||
2
vendor/github.com/Azure/go-autorest/tracing/go.sum
generated
vendored
Normal file
2
vendor/github.com/Azure/go-autorest/tracing/go.sum
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
24
vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go
generated
vendored
Normal file
24
vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
// +build modhack
|
||||
|
||||
package tracing
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file, and the github.com/Azure/go-autorest import, won't actually become part of
|
||||
// the resultant binary.
|
||||
|
||||
// Necessary for safely adding multi-module repo.
|
||||
// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
|
||||
import _ "github.com/Azure/go-autorest"
|
||||
34
vendor/github.com/client9/misspell/.gitignore
generated
vendored
Normal file
34
vendor/github.com/client9/misspell/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
dist/
|
||||
bin/
|
||||
vendor/
|
||||
|
||||
# editor turds
|
||||
*~
|
||||
*.gz
|
||||
*.bz2
|
||||
*.csv
|
||||
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
20
vendor/github.com/client9/misspell/.travis.yml
generated
vendored
Normal file
20
vendor/github.com/client9/misspell/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
sudo: required
|
||||
dist: trusty
|
||||
group: edge
|
||||
language: go
|
||||
go:
|
||||
- "1.10"
|
||||
git:
|
||||
depth: 1
|
||||
|
||||
script:
|
||||
- ./scripts/travis.sh
|
||||
|
||||
# calls goreleaser when a new tag is pushed
|
||||
deploy:
|
||||
- provider: script
|
||||
skip_cleanup: true
|
||||
script: curl -sL http://git.io/goreleaser | bash
|
||||
on:
|
||||
tags: true
|
||||
condition: $TRAVIS_OS_NAME = linux
|
||||
37
vendor/github.com/client9/misspell/Dockerfile
generated
vendored
Normal file
37
vendor/github.com/client9/misspell/Dockerfile
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
FROM golang:1.10.0-alpine
|
||||
|
||||
# cache buster
|
||||
RUN echo 4
|
||||
|
||||
# git is needed for "go get" below
|
||||
RUN apk add --no-cache git make
|
||||
|
||||
# these are my standard testing / linting tools
|
||||
RUN /bin/true \
|
||||
&& go get -u github.com/golang/dep/cmd/dep \
|
||||
&& go get -u github.com/alecthomas/gometalinter \
|
||||
&& gometalinter --install \
|
||||
&& rm -rf /go/src /go/pkg
|
||||
#
|
||||
# * SCOWL word list
|
||||
#
|
||||
# Downloads
|
||||
# http://wordlist.aspell.net/dicts/
|
||||
# --> http://app.aspell.net/create
|
||||
#
|
||||
|
||||
# use en_US large size
|
||||
# use regular size for others
|
||||
ENV SOURCE_US_BIG http://app.aspell.net/create?max_size=70&spelling=US&max_variant=2&diacritic=both&special=hacker&special=roman-numerals&download=wordlist&encoding=utf-8&format=inline
|
||||
|
||||
# should be able tell difference between English variations using this
|
||||
ENV SOURCE_US http://app.aspell.net/create?max_size=60&spelling=US&max_variant=1&diacritic=both&download=wordlist&encoding=utf-8&format=inline
|
||||
ENV SOURCE_GB_ISE http://app.aspell.net/create?max_size=60&spelling=GBs&max_variant=2&diacritic=both&download=wordlist&encoding=utf-8&format=inline
|
||||
ENV SOURCE_GB_IZE http://app.aspell.net/create?max_size=60&spelling=GBz&max_variant=2&diacritic=both&download=wordlist&encoding=utf-8&format=inline
|
||||
ENV SOURCE_CA http://app.aspell.net/create?max_size=60&spelling=CA&max_variant=2&diacritic=both&download=wordlist&encoding=utf-8&format=inline
|
||||
|
||||
RUN /bin/true \
|
||||
&& mkdir /scowl-wl \
|
||||
&& wget -O /scowl-wl/words-US-60.txt ${SOURCE_US} \
|
||||
&& wget -O /scowl-wl/words-GB-ise-60.txt ${SOURCE_GB_ISE}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user