mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Compare commits
174 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0ec8581964 | ||
|
|
2f7544344b | ||
|
|
b08f1fa8b5 | ||
|
|
fca4a0970f | ||
|
|
fda4c96937 | ||
|
|
3ef05e9e7f | ||
|
|
4bbafe7c19 | ||
|
|
77ec804529 | ||
|
|
25b9edae3c | ||
|
|
8da68695e1 | ||
|
|
b95380641f | ||
|
|
10d0ce0dfa | ||
|
|
2c06a33d41 | ||
|
|
4bd348d9b7 | ||
|
|
82559025b1 | ||
|
|
a2c88582fa | ||
|
|
a321a38328 | ||
|
|
970b35d737 | ||
|
|
364f467421 | ||
|
|
dd94f2ed93 | ||
|
|
7f20b5c891 | ||
|
|
e63e159c04 | ||
|
|
b7697869f2 | ||
|
|
19ced3d630 | ||
|
|
82af9c6321 | ||
|
|
37df42df7c | ||
|
|
ed1efe436f | ||
|
|
749e81c51c | ||
|
|
dc2cf723bc | ||
|
|
d7c12c5f00 | ||
|
|
a5f322521e | ||
|
|
52d226321b | ||
|
|
bb5930eb21 | ||
|
|
6c865fdf32 | ||
|
|
af1ffe7a15 | ||
|
|
97c0044a74 | ||
|
|
c7f8670b11 | ||
|
|
99472b6223 | ||
|
|
492da1b8a9 | ||
|
|
70f3619fad | ||
|
|
0d096edbee | ||
|
|
e7980442ef | ||
|
|
c2cf78a760 | ||
|
|
5092595384 | ||
|
|
d513c5d9d7 | ||
|
|
4d47bd7f36 | ||
|
|
96657caf60 | ||
|
|
3cf2c03f69 | ||
|
|
1715f4ff81 | ||
|
|
ae7467fb27 | ||
|
|
3a631a3287 | ||
|
|
4ee0740a13 | ||
|
|
992483d0fe | ||
|
|
8dc250892b | ||
|
|
08356fc9fa | ||
|
|
e5e931d333 | ||
|
|
835c6c9fa1 | ||
|
|
8a06ed32e2 | ||
|
|
14b67000d6 | ||
|
|
dbfcd19713 | ||
|
|
f598b8909a | ||
|
|
e6c9dfa05d | ||
|
|
f0f7ebbe9a | ||
|
|
4d37ec91a2 | ||
|
|
0286871a56 | ||
|
|
b2965c0724 | ||
|
|
89c7ddbaab | ||
|
|
308309d6d2 | ||
|
|
c8d4658277 | ||
|
|
6f0cd50dc0 | ||
|
|
89f453ebb3 | ||
|
|
783304f248 | ||
|
|
caafe96ce7 | ||
|
|
2a05825b38 | ||
|
|
2fee121df3 | ||
|
|
c3424337c8 | ||
|
|
b7a50fd772 | ||
|
|
f4977d1518 | ||
|
|
96c1c299eb | ||
|
|
ccff5fe155 | ||
|
|
289c003f87 | ||
|
|
c4ab2008e4 | ||
|
|
19aa8a234c | ||
|
|
bf2bd73f64 | ||
|
|
704a82bcf4 | ||
|
|
a01322a5e5 | ||
|
|
30aec81951 | ||
|
|
d8159c571d | ||
|
|
c59a0f0951 | ||
|
|
999dd3dc25 | ||
|
|
a243681ff8 | ||
|
|
73eb42467a | ||
|
|
b8eec7a459 | ||
|
|
a7d8e69820 | ||
|
|
3bd9dfc625 | ||
|
|
dca2e58b8e | ||
|
|
267efb3a4b | ||
|
|
c0d60196a1 | ||
|
|
2e1008e290 | ||
|
|
714a347692 | ||
|
|
af4b32c6ba | ||
|
|
c602d1256f | ||
|
|
a592cbc417 | ||
|
|
c557d187ab | ||
|
|
e8427d0eb4 | ||
|
|
c0f9761a61 | ||
|
|
e1dc63bcc7 | ||
|
|
8a458e0dcb | ||
|
|
99246cd254 | ||
|
|
33e9a52385 | ||
|
|
b833607504 | ||
|
|
bae120929e | ||
|
|
30dd78dcee | ||
|
|
1d4dc57ad1 | ||
|
|
3f0c06b58d | ||
|
|
bb4721049f | ||
|
|
78c9ae851c | ||
|
|
9660a7b469 | ||
|
|
81aa897c48 | ||
|
|
f4c64c2c75 | ||
|
|
ca5781827a | ||
|
|
31704047c5 | ||
|
|
1be0ab2bd1 | ||
|
|
f8442fbb0d | ||
|
|
f7b7f50b92 | ||
|
|
27a436b98a | ||
|
|
8f7dea10c1 | ||
|
|
68f43fe591 | ||
|
|
a0cbfcfbca | ||
|
|
2a91eda30d | ||
|
|
06dc26a83e | ||
|
|
add9d6e897 | ||
|
|
d9b763a28b | ||
|
|
3ff38bab59 | ||
|
|
ed1554dd19 | ||
|
|
931aac9c71 | ||
|
|
a497541f39 | ||
|
|
333b5cfbb6 | ||
|
|
eb2372137e | ||
|
|
7f2f6f2b16 | ||
|
|
5f0edb5f93 | ||
|
|
f5a7f716b3 | ||
|
|
5163ac3ace | ||
|
|
4c272e6ea2 | ||
|
|
c637b7f0e6 | ||
|
|
5462a599af | ||
|
|
7dcd7fa50f | ||
|
|
6271d51125 | ||
|
|
0bdbf51eb2 | ||
|
|
9aad51f328 | ||
|
|
699297711a | ||
|
|
1b976529bc | ||
|
|
877d9b18ee | ||
|
|
b58ec3b458 | ||
|
|
e94a6f129a | ||
|
|
1b22e102ce | ||
|
|
a35e3f49b4 | ||
|
|
9313041d77 | ||
|
|
83c81e6b58 | ||
|
|
eb356fe6ff | ||
|
|
a62bb54a3a | ||
|
|
92017d0fdd | ||
|
|
476bee3424 | ||
|
|
b630ecaaf7 | ||
|
|
359b38a34c | ||
|
|
2c525afc76 | ||
|
|
e2826fa66c | ||
|
|
0f5a5a2235 | ||
|
|
1f6a736aab | ||
|
|
8cbbe5501b | ||
|
|
dd63aac88c | ||
|
|
4cae47fc70 | ||
|
|
eeafdb253a | ||
|
|
e328b5ec53 |
6
.gitattributes
vendored
Normal file
6
.gitattributes
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
# Always check-out / check-in files with LF line endings.
|
||||
* text=auto eol=lf
|
||||
|
||||
go.sum merge=union
|
||||
**/zz_generated.*.go linguist-generated=true
|
||||
vendor/** linguist-vendored
|
||||
4
.github/workflows/helm.yaml
vendored
4
.github/workflows/helm.yaml
vendored
@@ -35,7 +35,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20.3'
|
||||
go-version: '1.22.2'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.2.1
|
||||
@@ -47,7 +47,7 @@ jobs:
|
||||
run: |
|
||||
changed=$(ct list-changed --config=.github/ci/ct.yaml)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "::set-output name=changed::true"
|
||||
echo "changed=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
|
||||
10
.github/workflows/manifests.yaml
vendored
10
.github/workflows/manifests.yaml
vendored
@@ -5,10 +5,10 @@ on:
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
strategy:
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ["v1.26.0"]
|
||||
descheduler-version: ["v0.26.1"]
|
||||
k8s-version: ["v1.30.0"]
|
||||
descheduler-version: ["v0.29.0"]
|
||||
descheduler-api: ["v1alpha1", "v1alpha2"]
|
||||
manifest: ["deployment"]
|
||||
runs-on: ubuntu-latest
|
||||
@@ -21,6 +21,10 @@ jobs:
|
||||
node_image: kindest/node:${{ matrix.k8s-version }}
|
||||
kubectl_version: ${{ matrix.k8s-version }}
|
||||
config: test/kind-config.yaml
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- name: Build image
|
||||
run: |
|
||||
VERSION="dev" make dev-image
|
||||
|
||||
2
.github/workflows/release.yaml
vendored
2
.github/workflows/release.yaml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: v3.4.0
|
||||
version: v3.7.0
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.1.0
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
run:
|
||||
deadline: 2m
|
||||
timeout: 2m
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
@@ -15,4 +15,4 @@ linters-settings:
|
||||
gofumpt:
|
||||
extra-rules: true
|
||||
goimports:
|
||||
local-prefixes: sigs.k8s.io/descheduler
|
||||
local-prefixes: sigs.k8s.io/descheduler
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.20.3
|
||||
FROM golang:1.22.2
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
|
||||
9
Makefile
9
Makefile
@@ -26,7 +26,7 @@ ARCHS = amd64 arm arm64
|
||||
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
|
||||
|
||||
GOLANGCI_VERSION := v1.52.1
|
||||
GOLANGCI_VERSION := v1.58.1
|
||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
|
||||
|
||||
GOFUMPT_VERSION := v0.4.0
|
||||
@@ -43,6 +43,10 @@ IMAGE:=descheduler:$(VERSION)
|
||||
# IMAGE_GCLOUD is the image name of descheduler in the remote registry
|
||||
IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
|
||||
|
||||
# CURRENT_DIR is the current dir where the Makefile exists
|
||||
CURRENT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
|
||||
# TODO: upload binaries to GCS bucket
|
||||
#
|
||||
# In the future binaries can be uploaded to
|
||||
@@ -129,6 +133,9 @@ gen:
|
||||
./hack/update-generated-defaulters.sh
|
||||
./hack/update-docs.sh
|
||||
|
||||
gen-docker:
|
||||
$(CONTAINER_ENGINE) run --entrypoint make -it -v $(CURRENT_DIR):/go/src/sigs.k8s.io/descheduler -w /go/src/sigs.k8s.io/descheduler golang:1.22.2 gen
|
||||
|
||||
verify-gen:
|
||||
./hack/verify-conversions.sh
|
||||
./hack/verify-deep-copies.sh
|
||||
|
||||
1
OWNERS
1
OWNERS
@@ -12,6 +12,7 @@ reviewers:
|
||||
- a7i
|
||||
- janeliul
|
||||
- knelasevero
|
||||
- jklaw90
|
||||
emeritus_approvers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
|
||||
129
README.md
129
README.md
@@ -38,6 +38,10 @@ that version's release branch, as listed below:
|
||||
|
||||
|Descheduler Version|Docs link|
|
||||
|---|---|
|
||||
|v0.30.x|[`release-1.30`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.30/README.md)|
|
||||
|v0.29.x|[`release-1.29`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.29/README.md)|
|
||||
|v0.28.x|[`release-1.28`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.28/README.md)|
|
||||
|v0.27.x|[`release-1.27`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.27/README.md)|
|
||||
|v0.26.x|[`release-1.26`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.26/README.md)|
|
||||
|v0.25.x|[`release-1.25`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.25/README.md)|
|
||||
|v0.24.x|[`release-1.24`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.24/README.md)|
|
||||
@@ -111,7 +115,7 @@ See the [user guide](docs/user-guide.md) in the `/docs` directory.
|
||||
|
||||
## Policy, Default Evictor and Strategy plugins
|
||||
|
||||
**⚠️ v1alpha1 configuration is still suported, but deprecated (and soon will be removed). Please consider migrating to v1alpha2 (described bellow). For previous v1alpha1 documentation go to [docs/deprecated/v1alpha1.md](docs/deprecated/v1alpha1.md) ⚠️**
|
||||
**⚠️ v1alpha1 configuration is still supported, but deprecated (and soon will be removed). Please consider migrating to v1alpha2 (described bellow). For previous v1alpha1 documentation go to [docs/deprecated/v1alpha1.md](docs/deprecated/v1alpha1.md) ⚠️**
|
||||
|
||||
The Descheduler Policy is configurable and includes default strategy plugins that can be enabled or disabled. It includes a common eviction configuration at the top level, as well as configuration from the Evictor plugin (Default Evictor, if not specified otherwise). Top-level configuration and Evictor plugin configuration are applied to all evictions.
|
||||
|
||||
@@ -139,6 +143,7 @@ The Default Evictor Plugin is used by default for filtering pods before processi
|
||||
|`labelSelector`|`metav1.LabelSelector`||(see [label filtering](#label-filtering))|
|
||||
|`priorityThreshold`|`priorityThreshold`||(see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|`bool`|`false`|(see [node fit filtering](#node-fit-filtering))|
|
||||
|`minReplicas`|`uint`|`0`| ignore eviction of pods where owner (e.g. `ReplicaSet`) replicas is below this threshold |
|
||||
|
||||
### Example policy
|
||||
|
||||
@@ -163,6 +168,7 @@ profiles:
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
nodeFit: true
|
||||
minReplicas: 2
|
||||
plugins:
|
||||
# DefaultEvictor is enabled for both `filter` and `preEvictionFilter`
|
||||
# filter:
|
||||
@@ -200,8 +206,9 @@ Balance Plugins: These plugins process all pods, or groups of pods, and determin
|
||||
| [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity) |Deschedule|Evicts pods violating node affinity|
|
||||
| [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints) |Deschedule|Evicts pods violating node taints|
|
||||
| [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint) |Balance|Evicts pods violating TopologySpreadConstraints|
|
||||
| [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts) |Deschedule|Evicts pods having too many restarts|
|
||||
| [PodLifeTime](#podlifetime) |Deschedule|Evicts pods that have exceeded a specified age limit|
|
||||
| [RemoveFailedPods](#removefailedpods) |Deschedule|Evicts pods with certain failed reasons|
|
||||
| [RemoveFailedPods](#removefailedpods) |Deschedule|Evicts pods with certain failed reasons and exit codes|
|
||||
|
||||
|
||||
### RemoveDuplicates
|
||||
@@ -378,10 +385,9 @@ profiles:
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
evictableNamespaces:
|
||||
namespaces:
|
||||
exclude:
|
||||
- "kube-system"
|
||||
- "namespace1"
|
||||
exclude:
|
||||
- "kube-system"
|
||||
- "namespace1"
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
@@ -434,7 +440,10 @@ profiles:
|
||||
This strategy makes sure all pods violating
|
||||
[node affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity)
|
||||
are eventually removed from nodes. Node affinity rules allow a pod to specify
|
||||
`requiredDuringSchedulingIgnoredDuringExecution` type, which tells the scheduler
|
||||
`requiredDuringSchedulingIgnoredDuringExecution` and/or
|
||||
`preferredDuringSchedulingIgnoredDuringExecution`.
|
||||
|
||||
The `requiredDuringSchedulingIgnoredDuringExecution` type tells the scheduler
|
||||
to respect node affinity when scheduling the pod but kubelet to ignore
|
||||
in case node changes over time and no longer respects the affinity.
|
||||
When enabled, the strategy serves as a temporary implementation
|
||||
@@ -447,6 +456,14 @@ of scheduling. Over time nodeA stops to satisfy the rule. When the strategy gets
|
||||
executed and there is another node available that satisfies the node affinity rule,
|
||||
podA gets evicted from nodeA.
|
||||
|
||||
The `preferredDuringSchedulingIgnoredDuringExecution` type tells the scheduler
|
||||
to respect node affinity when scheduling if that's possible. If not, the pod
|
||||
gets scheduled anyway. It may happen that, over time, the state of the cluster
|
||||
changes and now the pod can be scheduled on a node that actually fits its
|
||||
preferred node affinity. When enabled, the strategy serves as a temporary
|
||||
implementation of `preferredDuringSchedulingPreferredDuringExecution`, so the
|
||||
pod will be evicted if it can be scheduled on a "better" node.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
@@ -486,18 +503,22 @@ key=value matches an excludedTaints entry, the taint will be ignored.
|
||||
For example, excludedTaints entry "dedicated" would match all taints with key "dedicated", regardless of value.
|
||||
excludedTaints entry "dedicated=special-user" would match taints with key "dedicated" and value "special-user".
|
||||
|
||||
If a list of includedTaints is provided, a taint will be considered if and only if it matches an included key **or** key=value from the list. Otherwise it will be ignored. Leaving includedTaints unset will include any taint by default.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`excludedTaints`|list(string)|
|
||||
|`includedTaints`|list(string)|
|
||||
|`includePreferNoSchedule`|bool|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
````yaml
|
||||
Setting `excludedTaints`
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
@@ -512,7 +533,25 @@ profiles:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsViolatingNodeTaints"
|
||||
````
|
||||
```
|
||||
|
||||
Setting `includedTaints`
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "RemovePodsViolatingNodeTaints"
|
||||
args:
|
||||
includedTaints:
|
||||
- decommissioned=end-of-life # include only taints with key "decommissioned" and value "end-of-life"
|
||||
- reserved # include all taints with key "reserved"
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsViolatingNodeTaints"
|
||||
```
|
||||
|
||||
### RemovePodsViolatingTopologySpreadConstraint
|
||||
|
||||
@@ -520,18 +559,41 @@ This strategy makes sure that pods violating [topology spread constraints](https
|
||||
are evicted from nodes. Specifically, it tries to evict the minimum number of pods required to balance topology domains to within each constraint's `maxSkew`.
|
||||
This strategy requires k8s version 1.18 at a minimum.
|
||||
|
||||
By default, this strategy only deals with hard constraints, setting parameter `includeSoftConstraints` to `true` will
|
||||
include soft constraints.
|
||||
By default, this strategy only includes hard constraints, you can explicitly set `constraints` as shown below to include both:
|
||||
```yaml
|
||||
constraints:
|
||||
- DoNotSchedule
|
||||
- ScheduleAnyway
|
||||
```
|
||||
|
||||
The `topologyBalanceNodeFit` arg is used when balancing topology domains while the Default Evictor's `nodeFit` is used in pre-eviction to determine if a pod can be evicted.
|
||||
```yaml
|
||||
topologyBalanceNodeFit: false
|
||||
```
|
||||
|
||||
Strategy parameter `labelSelector` is not utilized when balancing topology domains and is only applied during eviction to determine if the pod can be evicted.
|
||||
|
||||
[Supported Constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/#spread-constraint-definition) fields:
|
||||
|
||||
|Name|Supported?|
|
||||
|----|----------|
|
||||
|`maxSkew`|Yes|
|
||||
|`minDomains`|No|
|
||||
|`topologyKey`|Yes|
|
||||
|`whenUnsatisfiable`|Yes|
|
||||
|`labelSelector`|Yes|
|
||||
|`matchLabelKeys`|Yes|
|
||||
|`nodeAffinityPolicy`|Yes|
|
||||
|`nodeTaintsPolicy`|Yes|
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`includeSoftConstraints`|bool|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`constraints`|(see [whenUnsatisfiable](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core))||
|
||||
|`topologyBalanceNodeFit`|bool|default `true`. [node fit filtering](#node-fit-filtering) when balancing topology domains|
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -543,7 +605,8 @@ profiles:
|
||||
pluginConfig:
|
||||
- name: "RemovePodsViolatingTopologySpreadConstraint"
|
||||
args:
|
||||
includeSoftConstraints: false
|
||||
constraints:
|
||||
- DoNotSchedule
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
@@ -559,6 +622,13 @@ include `podRestartThreshold`, which is the number of restarts (summed over all
|
||||
should be evicted, and `includingInitContainers`, which determines whether init container restarts should be factored
|
||||
into that calculation.
|
||||
|
||||
You can also specify `states` parameter to **only** evict pods matching the following conditions:
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`
|
||||
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) of: `CrashLoopBackOff`
|
||||
|
||||
If a value for `states` or `podStatusPhases` is not specified,
|
||||
Pods in any state (even `Running`) are considered for eviction.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
@@ -567,6 +637,7 @@ into that calculation.
|
||||
|`includingInitContainers`|bool|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`states`|list(string)|Only supported in v0.28+|
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -591,8 +662,9 @@ profiles:
|
||||
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
|
||||
|
||||
You can also specify `states` parameter to **only** evict pods matching the following conditions:
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`
|
||||
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`, `Unknown`
|
||||
- [Pod Reason](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions) reasons of: `NodeAffinity`, `NodeLost`, `Shutdown`, `UnexpectedAdmissionError`
|
||||
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`, `ImagePullBackOff`, `CrashLoopBackOff`, `CreateContainerConfigError`, `ErrImagePull`, `ImagePullBackOff`, `CreateContainerError`, `InvalidImageName`
|
||||
|
||||
If a value for `states` or `podStatusPhases` is not specified,
|
||||
Pods in any state (even `Running`) are considered for eviction.
|
||||
@@ -627,10 +699,8 @@ profiles:
|
||||
```
|
||||
|
||||
### RemoveFailedPods
|
||||
|
||||
This strategy evicts pods that are in failed status phase.
|
||||
You can provide an optional parameter to filter by failed `reasons`.
|
||||
`reasons` can be expanded to include reasons of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
|
||||
You can provide optional parameters to filter by failed pods' and containters' `reasons`. and `exitCodes`. `exitCodes` apply to failed pods' containers with `terminated` state only. `reasons` and `exitCodes` can be expanded to include those of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
|
||||
You can specify an optional parameter `minPodLifetimeSeconds` to evict pods that are older than specified seconds.
|
||||
Lastly, you can specify the optional parameter `excludeOwnerKinds` and if a pod
|
||||
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
|
||||
@@ -642,6 +712,7 @@ has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considere
|
||||
|`minPodLifetimeSeconds`|uint|
|
||||
|`excludeOwnerKinds`|list(string)|
|
||||
|`reasons`|list(string)|
|
||||
|`exitCodes`|list(int32)|
|
||||
|`includingInitContainers`|bool|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
@@ -658,6 +729,8 @@ profiles:
|
||||
args:
|
||||
reasons:
|
||||
- "NodeAffinity"
|
||||
exitCodes:
|
||||
- 1
|
||||
includingInitContainers: true
|
||||
excludeOwnerKinds:
|
||||
- "Job"
|
||||
@@ -672,7 +745,7 @@ profiles:
|
||||
|
||||
### Namespace filtering
|
||||
|
||||
The following strategies accept a `namespaces` parameter which allows to specify a list of including, resp. excluding namespaces:
|
||||
The following strategies accept a `namespaces` parameter which allows to specify a list of including and excluding namespaces respectively:
|
||||
* `PodLifeTime`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
@@ -682,11 +755,10 @@ The following strategies accept a `namespaces` parameter which allows to specify
|
||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||
* `RemoveFailedPods`
|
||||
|
||||
|
||||
The following strategies accept a `evictableNamespaces` parameter which allows to specify a list of excluding namespaces:
|
||||
The following strategies accept an `evictableNamespaces` parameter which allows to specify a list of excluding namespaces:
|
||||
* `LowNodeUtilization` and `HighNodeUtilization` (Only filtered right before eviction)
|
||||
|
||||
For example with PodLifeTime:
|
||||
In the following example with `PodLifeTime`, `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
@@ -707,8 +779,7 @@ profiles:
|
||||
- "PodLifeTime"
|
||||
```
|
||||
|
||||
In the example `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
|
||||
The similar holds for `exclude` field:
|
||||
The similar holds for `exclude` field. The strategy gets executed over all namespaces but `namespace1` and `namespace2` in the following example.
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
@@ -729,9 +800,7 @@ profiles:
|
||||
- "PodLifeTime"
|
||||
```
|
||||
|
||||
The strategy gets executed over all namespaces but `namespace1` and `namespace2`.
|
||||
|
||||
It's not allowed to compute `include` with `exclude` field.
|
||||
It's not allowed to combine `include` with `exclude` field.
|
||||
|
||||
### Priority filtering
|
||||
|
||||
@@ -834,6 +903,7 @@ profiles:
|
||||
- `nodeAffinity` on the pod
|
||||
- Resource `requests` made by the pod and the resources available on other nodes
|
||||
- Whether any of the other nodes are marked as `unschedulable`
|
||||
- Any `podAntiAffinity` between the pod and the pods on the other nodes
|
||||
|
||||
E.g.
|
||||
|
||||
@@ -855,7 +925,7 @@ profiles:
|
||||
- "PodLifeTime"
|
||||
```
|
||||
|
||||
Note that node fit filtering references the current pod spec, and not that of it's owner.
|
||||
Note that node fit filtering references the current pod spec, and not that of its owner.
|
||||
Thus, if the pod is owned by a ReplicationController (and that ReplicationController was modified recently),
|
||||
the pod may be running with an outdated spec, which the descheduler will reference when determining node fit.
|
||||
This is expected behavior as the descheduler is a "best-effort" mechanism.
|
||||
@@ -869,7 +939,7 @@ When the descheduler decides to evict pods from a node, it employs the following
|
||||
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted (unless `evictSystemCriticalPods: true` is set).
|
||||
* Pods (static or mirrored pods or standalone pods) not part of an ReplicationController, ReplicaSet(Deployment), StatefulSet, or Job are
|
||||
never evicted because these pods won't be recreated. (Standalone pods in failed status phase can be evicted by setting `evictFailedBarePods: true`)
|
||||
* Pods associated with DaemonSets are never evicted.
|
||||
* Pods associated with DaemonSets are never evicted (unless `evictDaemonSetPods: true` is set).
|
||||
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set).
|
||||
* Pods with PVCs are evicted (unless `ignorePvcPods: true` is set).
|
||||
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
|
||||
@@ -927,6 +997,7 @@ packages that it is compiled with.
|
||||
|
||||
| Descheduler | Supported Kubernetes Version |
|
||||
|-------------|------------------------------|
|
||||
| v0.27 | v1.27 |
|
||||
| v0.26 | v1.26 |
|
||||
| v0.25 | v1.25 |
|
||||
| v0.24 | v1.24 |
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.26.0
|
||||
appVersion: 0.26.0
|
||||
version: 0.29.0
|
||||
appVersion: 0.29.0
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
|
||||
@@ -55,6 +55,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
|
||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||
| `timeZone` | configure `timeZone` for CronJob | `nil` |
|
||||
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `3` |
|
||||
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `1` |
|
||||
| `ttlSecondsAfterFinished` | If set, configure `ttlSecondsAfterFinished` for the _descheduler_ job | `nil` |
|
||||
@@ -75,6 +76,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `service.enabled` | If `true`, create a service for deployment | `false` |
|
||||
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
|
||||
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
|
||||
| `serviceMonitor.additionalLabels` | Add custom labels to the ServiceMonitor resource | `{}` |
|
||||
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
|
||||
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
|
||||
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
|
||||
@@ -82,6 +84,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
|
||||
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
|
||||
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `topologySpreadConstraints` | Topology Spread Constraints to spread the descheduler cronjob/deployment across the cluster | `[]` |
|
||||
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
|
||||
| `commonLabels` | Labels to apply to all resources | `{}` |
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{{- if .Values.deschedulerPolicy }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
@@ -10,3 +11,4 @@ data:
|
||||
apiVersion: "{{ .Values.deschedulerPolicyAPIVersion }}"
|
||||
kind: "DeschedulerPolicy"
|
||||
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -21,6 +21,9 @@ spec:
|
||||
{{- if .Values.failedJobsHistoryLimit }}
|
||||
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if .Values.timeZone }}
|
||||
timeZone: {{ .Values.timeZone }}
|
||||
{{- end }}
|
||||
jobTemplate:
|
||||
spec:
|
||||
{{- if .Values.ttlSecondsAfterFinished }}
|
||||
@@ -48,6 +51,14 @@ spec:
|
||||
affinity:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- .Values.dnsConfig | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
@@ -66,15 +77,11 @@ spec:
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
{{- toYaml .Values.command | nindent 16 }}
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- --policy-config-file=/policy-dir/policy.yaml
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
- {{ printf "--%s" $key | quote }}
|
||||
{{- if $value }}
|
||||
- {{ $value | quote }}
|
||||
{{- end }}
|
||||
- {{ printf "--%s" $key }}{{ if $value }}={{ $value }}{{ end }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 16 }}
|
||||
@@ -85,6 +92,10 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
{{- if .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
|
||||
@@ -31,30 +31,29 @@ spec:
|
||||
{{- .Values.podAnnotations | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- .Values.dnsConfig | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
{{- toYaml .Values.command | nindent 12 }}
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--descheduling-interval"
|
||||
- {{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
|
||||
- --policy-config-file=/policy-dir/policy.yaml
|
||||
- --descheduling-interval={{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
- {{ printf "--%s" $key | quote }}
|
||||
{{- if $value }}
|
||||
- {{ $value | quote }}
|
||||
{{- end }}
|
||||
- {{ printf "--%s" $key }}{{ if $value }}={{ $value }}{{ end }}
|
||||
{{- end }}
|
||||
{{- include "descheduler.leaderElection" . | nindent 12 }}
|
||||
ports:
|
||||
@@ -69,6 +68,10 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
{{- if .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
@@ -81,6 +84,10 @@ spec:
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
||||
@@ -9,6 +9,12 @@ metadata:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
{{- if .Values.service.ipFamilyPolicy }}
|
||||
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.ipFamilies }}
|
||||
ipFamilies: {{ toYaml .Values.service.ipFamilies | nindent 4 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 10258
|
||||
|
||||
@@ -7,6 +7,9 @@ metadata:
|
||||
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
jobLabel: jobLabel
|
||||
namespaceSelector:
|
||||
|
||||
@@ -32,6 +32,10 @@ securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
|
||||
# podSecurityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 1000
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
@@ -45,6 +49,7 @@ suspend: false
|
||||
# successfulJobsHistoryLimit: 3
|
||||
# failedJobsHistoryLimit: 1
|
||||
# ttlSecondsAfterFinished 600
|
||||
# timeZone: Etc/UTC
|
||||
|
||||
# Required when running as a Deployment
|
||||
deschedulingInterval: 5m
|
||||
@@ -67,52 +72,76 @@ leaderElection: {}
|
||||
# resourceName: "descheduler"
|
||||
# resourceNamescape: "kube-system"
|
||||
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
cmdOptions:
|
||||
v: 3
|
||||
|
||||
# Recommended to use the latest Policy API version supported by the Descheduler app version
|
||||
deschedulerPolicyAPIVersion: "descheduler/v1alpha1"
|
||||
deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
|
||||
|
||||
# deschedulerPolicy contains the policies the descheduler will execute.
|
||||
# To use policies stored in an existing configMap use:
|
||||
# NOTE: The name of the cm should comply to {{ template "descheduler.fullname" . }}
|
||||
# deschedulerPolicy: {}
|
||||
deschedulerPolicy:
|
||||
# nodeSelector: "key1=value1,key2=value2"
|
||||
# maxNoOfPodsToEvictPerNode: 10
|
||||
# maxNoOfPodsToEvictPerNamespace: 10
|
||||
# ignorePvcPods: true
|
||||
# evictLocalStoragePods: true
|
||||
strategies:
|
||||
RemoveDuplicates:
|
||||
enabled: true
|
||||
RemovePodsHavingTooManyRestarts:
|
||||
enabled: true
|
||||
params:
|
||||
podsHavingTooManyRestarts:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
RemovePodsViolatingNodeTaints:
|
||||
enabled: true
|
||||
RemovePodsViolatingNodeAffinity:
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- requiredDuringSchedulingIgnoredDuringExecution
|
||||
RemovePodsViolatingInterPodAntiAffinity:
|
||||
enabled: true
|
||||
RemovePodsViolatingTopologySpreadConstraint:
|
||||
enabled: true
|
||||
params:
|
||||
includeSoftConstraints: false
|
||||
LowNodeUtilization:
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
cpu: 20
|
||||
memory: 20
|
||||
pods: 20
|
||||
targetThresholds:
|
||||
cpu: 50
|
||||
memory: 50
|
||||
pods: 50
|
||||
# evictDaemonSetPods: true
|
||||
# tracing:
|
||||
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
|
||||
# transportCert: ""
|
||||
# serviceName: ""
|
||||
# serviceNamespace: ""
|
||||
# sampleRate: 1.0
|
||||
# fallbackToNoOpProviderOnError: true
|
||||
profiles:
|
||||
- name: default
|
||||
pluginConfig:
|
||||
- name: DefaultEvictor
|
||||
args:
|
||||
ignorePvcPods: true
|
||||
evictLocalStoragePods: true
|
||||
- name: RemoveDuplicates
|
||||
- name: RemovePodsHavingTooManyRestarts
|
||||
args:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
- name: RemovePodsViolatingNodeTaints
|
||||
args:
|
||||
nodeAffinityType:
|
||||
- requiredDuringSchedulingIgnoredDuringExecution
|
||||
- name: RemovePodsViolatingInterPodAntiAffinity
|
||||
- name: RemovePodsViolatingTopologySpreadConstraint
|
||||
args:
|
||||
includeSoftConstraints: false
|
||||
- name: LowNodeUtilization
|
||||
args:
|
||||
thresholds:
|
||||
cpu: 20
|
||||
memory: 20
|
||||
pods: 20
|
||||
targetThresholds:
|
||||
cpu: 50
|
||||
memory: 50
|
||||
pods: 50
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- RemoveDuplicates
|
||||
- RemovePodsViolatingNodeAffinity
|
||||
- RemovePodsViolatingTopologySpreadConstraint
|
||||
- LowNodeUtilization
|
||||
deschedule:
|
||||
enabled:
|
||||
- RemovePodsHavingTooManyRestarts
|
||||
- RemovePodsViolatingNodeTaints
|
||||
- RemovePodsViolatingNodeAffinity
|
||||
- RemovePodsViolatingInterPodAntiAffinity
|
||||
|
||||
priorityClassName: system-cluster-critical
|
||||
|
||||
@@ -138,6 +167,13 @@ affinity: {}
|
||||
# values:
|
||||
# - descheduler
|
||||
# topologyKey: "kubernetes.io/hostname"
|
||||
topologySpreadConstraints: []
|
||||
# - maxSkew: 1
|
||||
# topologyKey: kubernetes.io/hostname
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# app.kubernetes.io/name: descheduler
|
||||
tolerations: []
|
||||
# - key: 'management'
|
||||
# operator: 'Equal'
|
||||
@@ -161,6 +197,8 @@ podAnnotations: {}
|
||||
|
||||
podLabels: {}
|
||||
|
||||
dnsConfig: {}
|
||||
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
@@ -172,11 +210,24 @@ livenessProbe:
|
||||
|
||||
service:
|
||||
enabled: false
|
||||
# @param service.ipFamilyPolicy [string], support SingleStack, PreferDualStack and RequireDualStack
|
||||
#
|
||||
ipFamilyPolicy: ""
|
||||
# @param service.ipFamilies [array] List of IP families (e.g. IPv4, IPv6) assigned to the service.
|
||||
# Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
|
||||
# E.g.
|
||||
# ipFamilies:
|
||||
# - IPv6
|
||||
# - IPv4
|
||||
ipFamilies: []
|
||||
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
# The namespace where Prometheus expects to find service monitors.
|
||||
# namespace: ""
|
||||
# Add custom labels to the ServiceMonitor resource
|
||||
additionalLabels: {}
|
||||
# prometheus: kube-prometheus-stack
|
||||
interval: ""
|
||||
# honorLabels: true
|
||||
insecureSkipVerify: true
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -43,6 +44,7 @@ type DeschedulerServer struct {
|
||||
EventClient clientset.Interface
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
DisableMetrics bool
|
||||
EnableHTTP2 bool
|
||||
}
|
||||
|
||||
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
||||
@@ -74,7 +76,9 @@ func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, err
|
||||
},
|
||||
}
|
||||
deschedulerscheme.Scheme.Default(&versionedCfg)
|
||||
cfg := componentconfig.DeschedulerConfiguration{}
|
||||
cfg := componentconfig.DeschedulerConfiguration{
|
||||
Tracing: componentconfig.TracingConfiguration{},
|
||||
}
|
||||
if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -83,7 +87,6 @@ func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, err
|
||||
|
||||
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
||||
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&rs.Logging.Format, "logging-format", "text", `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
|
||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
||||
fs.StringVar(&rs.ClientConnection.Kubeconfig, "kubeconfig", rs.ClientConnection.Kubeconfig, "File with kube configuration. Deprecated, use client-connection-kubeconfig instead.")
|
||||
fs.StringVar(&rs.ClientConnection.Kubeconfig, "client-connection-kubeconfig", rs.ClientConnection.Kubeconfig, "File path to kube configuration for interacting with kubernetes apiserver.")
|
||||
@@ -92,6 +95,13 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "Execute descheduler in dry run mode.")
|
||||
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
|
||||
fs.StringVar(&rs.Tracing.CollectorEndpoint, "otel-collector-endpoint", "", "Set this flag to the OpenTelemetry Collector Service Address")
|
||||
fs.StringVar(&rs.Tracing.TransportCert, "otel-transport-ca-cert", "", "Path of the CA Cert that can be used to generate the client Certificate for establishing secure connection to the OTEL in gRPC mode")
|
||||
fs.StringVar(&rs.Tracing.ServiceName, "otel-service-name", tracing.DefaultServiceName, "OTEL Trace name to be used with the resources")
|
||||
fs.StringVar(&rs.Tracing.ServiceNamespace, "otel-trace-namespace", "", "OTEL Trace namespace to be used with the resources")
|
||||
fs.Float64Var(&rs.Tracing.SampleRate, "otel-sample-rate", 1.0, "Sample rate to collect the Traces")
|
||||
fs.BoolVar(&rs.Tracing.FallbackToNoOpProviderOnError, "otel-fallback-no-op-on-error", false, "Fallback to NoOp Tracer in case of error")
|
||||
fs.BoolVar(&rs.EnableHTTP2, "enable-http2", false, "If http/2 should be enabled for the metrics and health check")
|
||||
|
||||
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
@@ -28,14 +27,18 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
apiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/server/mux"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
registry "k8s.io/component-base/logs/api/v1"
|
||||
jsonLog "k8s.io/component-base/logs/json"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/component-base/logs"
|
||||
logsapi "k8s.io/component-base/logs/api/v1"
|
||||
_ "k8s.io/component-base/logs/json/register"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -48,36 +51,31 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
klog.ErrorS(err, "unable to initialize server")
|
||||
}
|
||||
|
||||
featureGate := featuregate.NewFeatureGate()
|
||||
logConfig := logsapi.NewLoggingConfiguration()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "descheduler",
|
||||
Short: "descheduler",
|
||||
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
// s.Logs.Config.Format = s.Logging.Format
|
||||
|
||||
// LoopbackClientConfig is a config for a privileged loopback connection
|
||||
var LoopbackClientConfig *restclient.Config
|
||||
var SecureServing *apiserver.SecureServingInfo
|
||||
if err := s.SecureServing.ApplyTo(&SecureServing, &LoopbackClientConfig); err != nil {
|
||||
Long: "The descheduler evicts pods which may be bound to less desired nodes",
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
logs.InitLogs()
|
||||
if logsapi.ValidateAndApply(logConfig, featureGate); err != nil {
|
||||
return err
|
||||
}
|
||||
descheduler.SetupPlugins()
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// loopbackClientConfig is a config for a privileged loopback connection
|
||||
var loopbackClientConfig *restclient.Config
|
||||
var secureServing *apiserver.SecureServingInfo
|
||||
if err := s.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
|
||||
klog.ErrorS(err, "failed to apply secure server configuration")
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
var factory registry.LogFormatFactory
|
||||
if s.Logging.Format == "json" {
|
||||
factory = jsonLog.Factory{}
|
||||
}
|
||||
|
||||
if factory == nil {
|
||||
klog.ClearLogger()
|
||||
} else {
|
||||
log, loggerControl := factory.Create(registry.LoggingConfiguration{
|
||||
Format: s.Logging.Format,
|
||||
Verbosity: s.Logging.Verbosity,
|
||||
}, registry.LoggingOptions{})
|
||||
defer loggerControl.Flush()
|
||||
klog.SetLogger(log)
|
||||
}
|
||||
secureServing.DisableHTTP2 = !s.EnableHTTP2
|
||||
|
||||
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
@@ -88,33 +86,42 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
stoppedCh, _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
stoppedCh, _, err := secureServing.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
err = Run(ctx, s)
|
||||
if err != nil {
|
||||
if err = Run(ctx, s); err != nil {
|
||||
klog.ErrorS(err, "descheduler server")
|
||||
return err
|
||||
}
|
||||
|
||||
done()
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.SetOut(out)
|
||||
flags := cmd.Flags()
|
||||
s.AddFlags(flags)
|
||||
|
||||
runtime.Must(logsapi.AddFeatureGates(featureGate))
|
||||
logsapi.AddFlags(logConfig, flags)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
err := tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tracing.Shutdown(ctx)
|
||||
// increase the fake watch channel so the dry-run mode can be run
|
||||
// over a cluster with thousands of pods
|
||||
watch.DefaultChanSize = 100000
|
||||
return descheduler.Run(ctx, rs)
|
||||
}
|
||||
|
||||
func SetupLogs() {
|
||||
klog.SetOutput(os.Stdout)
|
||||
klog.InitFlags(nil)
|
||||
}
|
||||
|
||||
@@ -21,14 +21,8 @@ import (
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
)
|
||||
|
||||
func init() {
|
||||
app.SetupLogs()
|
||||
descheduler.SetupPlugins()
|
||||
}
|
||||
|
||||
func main() {
|
||||
out := os.Stdout
|
||||
cmd := app.NewDeschedulerCommand(out)
|
||||
|
||||
@@ -13,7 +13,7 @@ descheduler [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0)
|
||||
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces and IP address families will be used. (default 0.0.0.0)
|
||||
--cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates")
|
||||
--client-connection-burst int32 Burst to use for interacting with kubernetes apiserver.
|
||||
--client-connection-kubeconfig string File path to kube configuration for interacting with kubernetes apiserver.
|
||||
@@ -21,6 +21,7 @@ descheduler [flags]
|
||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
|
||||
--dry-run Execute descheduler in dry run mode.
|
||||
--enable-http2 If http/2 should be enabled for the metrics and health check
|
||||
-h, --help help for descheduler
|
||||
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
|
||||
--kubeconfig string File with kube configuration. Deprecated, use client-connection-kubeconfig instead.
|
||||
@@ -31,18 +32,31 @@ descheduler [flags]
|
||||
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
|
||||
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
|
||||
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 26s)
|
||||
--logging-format string Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning. (default "text")
|
||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||
--log-json-info-buffer-size quantity [Alpha] In JSON format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-json-split-stream [Alpha] In JSON format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-text-info-buffer-size quantity [Alpha] In text format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-text-split-stream [Alpha] In text format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--logging-format string Sets the log format. Permitted formats: "json" (gated by LoggingBetaOptions), "text". (default "text")
|
||||
--otel-collector-endpoint string Set this flag to the OpenTelemetry Collector Service Address
|
||||
--otel-fallback-no-op-on-error Fallback to NoOp Tracer in case of error
|
||||
--otel-sample-rate float Sample rate to collect the Traces (default 1)
|
||||
--otel-service-name string OTEL Trace name to be used with the resources (default "descheduler")
|
||||
--otel-trace-namespace string OTEL Trace namespace to be used with the resources
|
||||
--otel-transport-ca-cert string Path of the CA Cert that can be used to generate the client Certificate for establishing secure connection to the OTEL in gRPC mode
|
||||
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
|
||||
--permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]
|
||||
--policy-config-file string File with descheduler policy configuration.
|
||||
--secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 10258)
|
||||
--tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
|
||||
--tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
|
||||
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
|
||||
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA.
|
||||
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256.
|
||||
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_RC4_128_SHA.
|
||||
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
|
||||
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
|
||||
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
|
||||
-v, --v Level number for the log level verbosity
|
||||
--vmodule pattern=N,... comma-separated list of pattern=N settings for file-filtered logging (only works for text log format)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
@@ -109,17 +109,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.26.1' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.30.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.26.1' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.30.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A Deployment
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.26.0=1' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.30.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
@@ -135,6 +135,7 @@ The policy includes a common configuration that applies to all the strategies:
|
||||
|------|---------------|-------------|
|
||||
| `nodeSelector` | `nil` | limiting the nodes which are processed |
|
||||
| `evictLocalStoragePods` | `false` | allows eviction of pods with local storage |
|
||||
| `evictDaemonSetPods` | `false` | allows eviction of pods associated to DaemonSet resources |
|
||||
| `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
|
||||
| `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored |
|
||||
| `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) |
|
||||
@@ -152,6 +153,7 @@ kind: "DeschedulerPolicy"
|
||||
nodeSelector: prod=dev
|
||||
evictFailedBarePods: false
|
||||
evictLocalStoragePods: true
|
||||
evictDaemonSetPods: true
|
||||
evictSystemCriticalPods: true
|
||||
maxNoOfPodsToEvictPerNode: 40
|
||||
ignorePvcPods: false
|
||||
|
||||
@@ -4,6 +4,12 @@ Starting with descheduler release v0.10.0 container images are available in the
|
||||
|
||||
Descheduler Version | Container Image | Architectures |
|
||||
------------------- |-------------------------------------------------|-------------------------|
|
||||
v0.30.0 | registry.k8s.io/descheduler/descheduler:v0.30.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.29.0 | registry.k8s.io/descheduler/descheduler:v0.29.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.28.1 | registry.k8s.io/descheduler/descheduler:v0.28.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.28.0 | registry.k8s.io/descheduler/descheduler:v0.28.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.27.1 | registry.k8s.io/descheduler/descheduler:v0.27.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.27.0 | registry.k8s.io/descheduler/descheduler:v0.27.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.26.1 | registry.k8s.io/descheduler/descheduler:v0.26.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.26.0 | registry.k8s.io/descheduler/descheduler:v0.26.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.25.1 | registry.k8s.io/descheduler/descheduler:v0.25.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
|
||||
@@ -21,7 +21,9 @@ profiles:
|
||||
includingInitContainers: true
|
||||
- name: "RemovePodsViolatingTopologySpreadConstraint"
|
||||
args:
|
||||
includeSoftConstraints: true
|
||||
constraints:
|
||||
- DoNotSchedule
|
||||
- ScheduleAnyway
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
|
||||
15
examples/too-many-restarts.yml
Normal file
15
examples/too-many-restarts.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "RemovePodsHavingTooManyRestarts"
|
||||
args:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
states:
|
||||
- CrashLoopBackOff
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsHavingTooManyRestarts"
|
||||
@@ -5,7 +5,9 @@ profiles:
|
||||
pluginConfig:
|
||||
- name: "RemovePodsViolatingTopologySpreadConstraint"
|
||||
args:
|
||||
includeSoftConstraints: true # Include 'ScheduleAnyways' constraints
|
||||
constraints:
|
||||
- DoNotSchedule
|
||||
- ScheduleAnyway
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
|
||||
146
go.mod
146
go.mod
@@ -1,114 +1,116 @@
|
||||
module sigs.k8s.io/descheduler
|
||||
|
||||
go 1.20
|
||||
go 1.22.2
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/spf13/cobra v1.6.0
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
k8s.io/api v0.27.0
|
||||
k8s.io/apimachinery v0.27.0
|
||||
k8s.io/apiserver v0.27.0
|
||||
k8s.io/client-go v0.27.0
|
||||
k8s.io/code-generator v0.27.0
|
||||
k8s.io/component-base v0.27.0
|
||||
k8s.io/component-helpers v0.27.0
|
||||
k8s.io/klog/v2 v2.90.1
|
||||
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749
|
||||
go.opentelemetry.io/otel v1.24.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0
|
||||
go.opentelemetry.io/otel/sdk v1.24.0
|
||||
go.opentelemetry.io/otel/trace v1.24.0
|
||||
google.golang.org/grpc v1.62.0
|
||||
k8s.io/api v0.30.0
|
||||
k8s.io/apimachinery v0.30.0
|
||||
k8s.io/apiserver v0.30.0
|
||||
k8s.io/client-go v0.30.0
|
||||
k8s.io/code-generator v0.30.0
|
||||
k8s.io/component-base v0.30.0
|
||||
k8s.io/component-helpers v0.30.0
|
||||
k8s.io/klog/v2 v2.120.1
|
||||
k8s.io/utils v0.0.0-20240310230437-4693a0247e57
|
||||
sigs.k8s.io/mdtoc v1.1.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.4.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.2.3 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.1 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
|
||||
github.com/google/cel-go v0.12.6 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/google/cel-go v0.17.8 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.7 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.7 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect
|
||||
go.opentelemetry.io/otel v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.31.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.10.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/mod v0.9.0 // indirect
|
||||
golang.org/x/net v0.8.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/term v0.6.0 // indirect
|
||||
golang.org/x/text v0.8.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||
golang.org/x/tools v0.7.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
|
||||
google.golang.org/grpc v1.51.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.10 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.10 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.24.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/mod v0.15.0 // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/oauth2 v0.16.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.18.0 // indirect
|
||||
golang.org/x/term v0.18.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.18.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
|
||||
k8s.io/kms v0.27.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect
|
||||
k8s.io/kms v0.30.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
replace go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0
|
||||
|
||||
812
go.sum
812
go.sum
@@ -1,764 +1,348 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
|
||||
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
|
||||
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU=
|
||||
github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
|
||||
github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8=
|
||||
github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6XpZ4JqolL5u642Js5Z0n7psPVl+S5heo=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M=
|
||||
github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw=
|
||||
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
|
||||
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
|
||||
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
|
||||
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk=
|
||||
github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
|
||||
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
|
||||
github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE=
|
||||
github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
|
||||
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI=
|
||||
github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY=
|
||||
go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY=
|
||||
go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU=
|
||||
go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4=
|
||||
go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc=
|
||||
go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c=
|
||||
go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4=
|
||||
go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0=
|
||||
go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs=
|
||||
go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A=
|
||||
go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY=
|
||||
go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE=
|
||||
go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E=
|
||||
go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
|
||||
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
|
||||
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k=
|
||||
go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U=
|
||||
go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4=
|
||||
go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA=
|
||||
go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao=
|
||||
go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc=
|
||||
go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg=
|
||||
go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48=
|
||||
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
|
||||
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 h1:Mw5xcxMwlqoJd97vwPxA8isEaIoxsta9/Q51+TTJLGE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0/go.mod h1:CQNu9bj7o7mC6U7+CA/schKEYakYXWr79ucDHTMGhCM=
|
||||
go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
|
||||
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
|
||||
go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
|
||||
go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
|
||||
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
|
||||
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
|
||||
go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
|
||||
go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
||||
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
|
||||
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
|
||||
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
|
||||
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
|
||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
|
||||
golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I=
|
||||
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
|
||||
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s=
|
||||
google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk=
|
||||
google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.27.0 h1:2owttiA8Oa+J3idFeq8TSnNpm4y6AOGPI3PDbIpp2cE=
|
||||
k8s.io/api v0.27.0/go.mod h1:Wl+QRvQlh+T8SK5f4F6YBhhyH6hrFO08nl74xZb1MUE=
|
||||
k8s.io/apimachinery v0.27.0 h1:vEyy/PVMbPMCPutrssCVHCf0JNZ0Px+YqPi82K2ALlk=
|
||||
k8s.io/apimachinery v0.27.0/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM=
|
||||
k8s.io/apiserver v0.27.0 h1:sXt/2yVMebZef6GqJHs4IYHSdSYwwrJCafBV/KSCwDw=
|
||||
k8s.io/apiserver v0.27.0/go.mod h1:8heEJ5f6EqiKwXC3Ez3ikgOvGtRSEQG/SQZkhO9UzIg=
|
||||
k8s.io/client-go v0.27.0 h1:DyZS1fJkv73tEy7rWv4VF6NwGeJ7SKvNaLRXZBYLA+4=
|
||||
k8s.io/client-go v0.27.0/go.mod h1:XVEmpNnM+4JYO3EENoFV/ZDv3KxKVJUnzGo70avk+C4=
|
||||
k8s.io/code-generator v0.27.0 h1:XQsGgUKnxuaPNr5V+Bg/AUmbvfhVu1jfxTW8PpQyGs0=
|
||||
k8s.io/code-generator v0.27.0/go.mod h1:iWtpm0ZMG6Gc4daWfITDSIu+WFhFJArYDhj242zcbnY=
|
||||
k8s.io/component-base v0.27.0 h1:g3/FkscH8Uqg9SiDCEfhfhTVwKiVo4T2+iBwUqiFkMg=
|
||||
k8s.io/component-base v0.27.0/go.mod h1:PXyBQd/vYYjqqGB83rnsHffTTG6zlmxZAd0ZSOu6evk=
|
||||
k8s.io/component-helpers v0.27.0 h1:rymQGJc4s30hHeb5VGuPdht8gKIPecj+Bw2FOJSavE4=
|
||||
k8s.io/component-helpers v0.27.0/go.mod h1:vMjVwym/Y0BVyNvg8a4Et2vyPJAh/JhBM0OTRAt0Ceg=
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08=
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
|
||||
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kms v0.27.0 h1:adCotKQybOjxwbxW7ogXyv8uQGan/3Y126S2aNW4YFY=
|
||||
k8s.io/kms v0.27.0/go.mod h1:vI2R4Nhw+PZ+DYtVPVYKsIqip2IYjZWK9bESR64WdIw=
|
||||
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a h1:gmovKNur38vgoWfGtP5QOGNOA7ki4n6qNYoFAgMlNvg=
|
||||
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY=
|
||||
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 h1:xMMXJlJbsU8w3V5N2FLDQ8YgU8s1EoULdbQBcAeNJkY=
|
||||
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1 h1:MB1zkK+WMOmfLxEpjr1wEmkpcIhZC7kfTkZ0stg5bog=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1/go.mod h1:/4NLd21PQY0B+H+X0aDZdwUiVXYJQl/2NXA5KVtDiP4=
|
||||
k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA=
|
||||
k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE=
|
||||
k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA=
|
||||
k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
|
||||
k8s.io/apiserver v0.30.0 h1:QCec+U72tMQ+9tR6A0sMBB5Vh6ImCEkoKkTDRABWq6M=
|
||||
k8s.io/apiserver v0.30.0/go.mod h1:smOIBq8t0MbKZi7O7SyIpjPsiKJ8qa+llcFCluKyqiY=
|
||||
k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ=
|
||||
k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY=
|
||||
k8s.io/code-generator v0.30.0 h1:3VUVqHvWFSVSm9kqL/G6kD4ZwNdHF6J/jPyo3Jgjy3k=
|
||||
k8s.io/code-generator v0.30.0/go.mod h1:mBMZhfRR4IunJUh2+7LVmdcWwpouCH5+LNPkZ3t/v7Q=
|
||||
k8s.io/component-base v0.30.0 h1:cj6bp38g0ainlfYtaOQuRELh5KSYjhKxM+io7AUIk4o=
|
||||
k8s.io/component-base v0.30.0/go.mod h1:V9x/0ePFNaKeKYA3bOvIbrNoluTSG+fSJKjLdjOoeXQ=
|
||||
k8s.io/component-helpers v0.30.0 h1:xbJtNCfSM4SB/Tz5JqCKDZv4eT5LVi/AWQ1VOxhmStU=
|
||||
k8s.io/component-helpers v0.30.0/go.mod h1:68HlSwXIumMKmCx8cZe1PoafQEYh581/sEpxMrkhmX4=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
|
||||
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.30.0 h1:ZlnD/ei5lpvUlPw6eLfVvH7d8i9qZ6HwUQgydNVks8g=
|
||||
k8s.io/kms v0.30.0/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY=
|
||||
k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo=
|
||||
sigs.k8s.io/mdtoc v1.1.0/go.mod h1:QZLVEdHH2iNIR4uHAZyvFRtjloHgVItk8lo/mzCtq3w=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
|
||||
257
hack/cherry_pick_pull.sh
Executable file
257
hack/cherry_pick_pull.sh
Executable file
@@ -0,0 +1,257 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Usage Instructions: https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md
|
||||
|
||||
# Checkout a PR from GitHub. (Yes, this is sitting in a Git tree. How
|
||||
# meta.) Assumes you care about pulls from remote "upstream" and
|
||||
# checks them out to a branch named:
|
||||
# automated-cherry-pick-of-<pr>-<target branch>-<timestamp>
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
REPO_ROOT="$(git rev-parse --show-toplevel)"
|
||||
declare -r REPO_ROOT
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
STARTINGBRANCH=$(git symbolic-ref --short HEAD)
|
||||
declare -r STARTINGBRANCH
|
||||
declare -r REBASEMAGIC="${REPO_ROOT}/.git/rebase-apply"
|
||||
DRY_RUN=${DRY_RUN:-""}
|
||||
REGENERATE_DOCS=${REGENERATE_DOCS:-""}
|
||||
UPSTREAM_REMOTE=${UPSTREAM_REMOTE:-upstream}
|
||||
FORK_REMOTE=${FORK_REMOTE:-origin}
|
||||
MAIN_REPO_ORG=${MAIN_REPO_ORG:-$(git remote get-url "$UPSTREAM_REMOTE" | awk '{gsub(/http[s]:\/\/|git@/,"")}1' | awk -F'[@:./]' 'NR==1{print $3}')}
|
||||
MAIN_REPO_NAME=${MAIN_REPO_NAME:-$(git remote get-url "$UPSTREAM_REMOTE" | awk '{gsub(/http[s]:\/\/|git@/,"")}1' | awk -F'[@:./]' 'NR==1{print $4}')}
|
||||
|
||||
if [[ -z ${GITHUB_USER:-} ]]; then
|
||||
echo "Please export GITHUB_USER=<your-user> (or GH organization, if that's where your fork lives)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v gh > /dev/null; then
|
||||
echo "Can't find 'gh' tool in PATH, please install from https://github.com/cli/cli"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$#" -lt 2 ]]; then
|
||||
echo "${0} <remote branch> <pr-number>...: cherry pick one or more <pr> onto <remote branch> and leave instructions for proposing pull request"
|
||||
echo
|
||||
echo " Checks out <remote branch> and handles the cherry-pick of <pr> (possibly multiple) for you."
|
||||
echo " Examples:"
|
||||
echo " $0 upstream/release-3.14 12345 # Cherry-picks PR 12345 onto upstream/release-3.14 and proposes that as a PR."
|
||||
echo " $0 upstream/release-3.14 12345 56789 # Cherry-picks PR 12345, then 56789 and proposes the combination as a single PR."
|
||||
echo
|
||||
echo " Set the DRY_RUN environment var to skip git push and creating PR."
|
||||
echo " This is useful for creating patches to a release branch without making a PR."
|
||||
echo " When DRY_RUN is set the script will leave you in a branch containing the commits you cherry-picked."
|
||||
echo
|
||||
echo " Set the REGENERATE_DOCS environment var to regenerate documentation for the target branch after picking the specified commits."
|
||||
echo " This is useful when picking commits containing changes to API documentation."
|
||||
echo
|
||||
echo " Set UPSTREAM_REMOTE (default: upstream) and FORK_REMOTE (default: origin)"
|
||||
echo " to override the default remote names to what you have locally."
|
||||
echo
|
||||
echo " For merge process info, see https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Checks if you are logged in. Will error/bail if you are not.
|
||||
gh auth status
|
||||
|
||||
if git_status=$(git status --porcelain --untracked=no 2>/dev/null) && [[ -n "${git_status}" ]]; then
|
||||
echo "!!! Dirty tree. Clean up and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -e "${REBASEMAGIC}" ]]; then
|
||||
echo "!!! 'git rebase' or 'git am' in progress. Clean up and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
declare -r BRANCH="$1"
|
||||
shift 1
|
||||
declare -r PULLS=( "$@" )
|
||||
|
||||
function join { local IFS="$1"; shift; echo "$*"; }
|
||||
PULLDASH=$(join - "${PULLS[@]/#/#}") # Generates something like "#12345-#56789"
|
||||
declare -r PULLDASH
|
||||
PULLSUBJ=$(join " " "${PULLS[@]/#/#}") # Generates something like "#12345 #56789"
|
||||
declare -r PULLSUBJ
|
||||
|
||||
echo "+++ Updating remotes..."
|
||||
git remote update "${UPSTREAM_REMOTE}" "${FORK_REMOTE}"
|
||||
|
||||
if ! git log -n1 --format=%H "${BRANCH}" >/dev/null 2>&1; then
|
||||
echo "!!! '${BRANCH}' not found. The second argument should be something like ${UPSTREAM_REMOTE}/release-0.21."
|
||||
echo " (In particular, it needs to be a valid, existing remote branch that I can 'git checkout'.)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NEWBRANCHREQ="automated-cherry-pick-of-${PULLDASH}" # "Required" portion for tools.
|
||||
declare -r NEWBRANCHREQ
|
||||
NEWBRANCH="$(echo "${NEWBRANCHREQ}-${BRANCH}" | sed 's/\//-/g')"
|
||||
declare -r NEWBRANCH
|
||||
NEWBRANCHUNIQ="${NEWBRANCH}-$(date +%s)"
|
||||
declare -r NEWBRANCHUNIQ
|
||||
echo "+++ Creating local branch ${NEWBRANCHUNIQ}"
|
||||
|
||||
cleanbranch=""
|
||||
gitamcleanup=false
|
||||
function return_to_kansas {
|
||||
if [[ "${gitamcleanup}" == "true" ]]; then
|
||||
echo
|
||||
echo "+++ Aborting in-progress git am."
|
||||
git am --abort >/dev/null 2>&1 || true
|
||||
fi
|
||||
|
||||
# return to the starting branch and delete the PR text file
|
||||
if [[ -z "${DRY_RUN}" ]]; then
|
||||
echo
|
||||
echo "+++ Returning you to the ${STARTINGBRANCH} branch and cleaning up."
|
||||
git checkout -f "${STARTINGBRANCH}" >/dev/null 2>&1 || true
|
||||
if [[ -n "${cleanbranch}" ]]; then
|
||||
git branch -D "${cleanbranch}" >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
}
|
||||
trap return_to_kansas EXIT
|
||||
|
||||
SUBJECTS=()
|
||||
function make-a-pr() {
|
||||
local rel
|
||||
rel="$(basename "${BRANCH}")"
|
||||
echo
|
||||
echo "+++ Creating a pull request on GitHub at ${GITHUB_USER}:${NEWBRANCH}"
|
||||
|
||||
local numandtitle
|
||||
numandtitle=$(printf '%s\n' "${SUBJECTS[@]}")
|
||||
prtext=$(cat <<EOF
|
||||
Cherry pick of ${PULLSUBJ} on ${rel}.
|
||||
|
||||
${numandtitle}
|
||||
|
||||
For details on the cherry pick process, see the [cherry pick requests](https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md) page.
|
||||
|
||||
\`\`\`release-note
|
||||
|
||||
\`\`\`
|
||||
EOF
|
||||
)
|
||||
|
||||
gh pr create --title="Automated cherry pick of ${numandtitle}" --body="${prtext}" --head "${GITHUB_USER}:${NEWBRANCH}" --base "${rel}" --repo="${MAIN_REPO_ORG}/${MAIN_REPO_NAME}"
|
||||
}
|
||||
|
||||
git checkout -b "${NEWBRANCHUNIQ}" "${BRANCH}"
|
||||
cleanbranch="${NEWBRANCHUNIQ}"
|
||||
|
||||
gitamcleanup=true
|
||||
for pull in "${PULLS[@]}"; do
|
||||
echo "+++ Downloading patch to /tmp/${pull}.patch (in case you need to do this again)"
|
||||
|
||||
curl -o "/tmp/${pull}.patch" -sSL "https://github.com/${MAIN_REPO_ORG}/${MAIN_REPO_NAME}/pull/${pull}.patch"
|
||||
echo
|
||||
echo "+++ About to attempt cherry pick of PR. To reattempt:"
|
||||
echo " $ git am -3 /tmp/${pull}.patch"
|
||||
echo
|
||||
git am -3 "/tmp/${pull}.patch" || {
|
||||
conflicts=false
|
||||
while unmerged=$(git status --porcelain | grep ^U) && [[ -n ${unmerged} ]] \
|
||||
|| [[ -e "${REBASEMAGIC}" ]]; do
|
||||
conflicts=true # <-- We should have detected conflicts once
|
||||
echo
|
||||
echo "+++ Conflicts detected:"
|
||||
echo
|
||||
(git status --porcelain | grep ^U) || echo "!!! None. Did you git am --continue?"
|
||||
echo
|
||||
echo "+++ Please resolve the conflicts in another window (and remember to 'git add / git am --continue')"
|
||||
read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r
|
||||
echo
|
||||
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
|
||||
echo "Aborting." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "${conflicts}" != "true" ]]; then
|
||||
echo "!!! git am failed, likely because of an in-progress 'git am' or 'git rebase'"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# set the subject
|
||||
subject=$(grep -m 1 "^Subject" "/tmp/${pull}.patch" | sed -e 's/Subject: \[PATCH//g' | sed 's/.*] //')
|
||||
SUBJECTS+=("#${pull}: ${subject}")
|
||||
|
||||
# remove the patch file from /tmp
|
||||
rm -f "/tmp/${pull}.patch"
|
||||
done
|
||||
gitamcleanup=false
|
||||
|
||||
# Re-generate docs (if needed)
|
||||
if [[ -n "${REGENERATE_DOCS}" ]]; then
|
||||
echo
|
||||
echo "Regenerating docs..."
|
||||
if ! hack/generate-docs.sh; then
|
||||
echo
|
||||
echo "hack/generate-docs.sh FAILED to complete."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${DRY_RUN}" ]]; then
|
||||
echo "!!! Skipping git push and PR creation because you set DRY_RUN."
|
||||
echo "To return to the branch you were in when you invoked this script:"
|
||||
echo
|
||||
echo " git checkout ${STARTINGBRANCH}"
|
||||
echo
|
||||
echo "To delete this branch:"
|
||||
echo
|
||||
echo " git branch -D ${NEWBRANCHUNIQ}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if git remote -v | grep ^"${FORK_REMOTE}" | grep "${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git"; then
|
||||
echo "!!! You have ${FORK_REMOTE} configured as your ${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git"
|
||||
echo "This isn't normal. Leaving you with push instructions:"
|
||||
echo
|
||||
echo "+++ First manually push the branch this script created:"
|
||||
echo
|
||||
echo " git push REMOTE ${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
echo
|
||||
echo "where REMOTE is your personal fork (maybe ${UPSTREAM_REMOTE}? Consider swapping those.)."
|
||||
echo "OR consider setting UPSTREAM_REMOTE and FORK_REMOTE to different values."
|
||||
echo
|
||||
make-a-pr
|
||||
cleanbranch=""
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "+++ I'm about to do the following to push to GitHub (and I'm assuming ${FORK_REMOTE} is your personal fork):"
|
||||
echo
|
||||
echo " git push ${FORK_REMOTE} ${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
echo
|
||||
read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r
|
||||
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
|
||||
echo "Aborting." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git push "${FORK_REMOTE}" -f "${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
make-a-pr
|
||||
@@ -20,6 +20,6 @@ function find_dirs_containing_comment_tags() {
|
||||
| LC_ALL=C sort -u \
|
||||
)
|
||||
|
||||
IFS=",";
|
||||
IFS=" ";
|
||||
printf '%s' "${array[*]}";
|
||||
}
|
||||
|
||||
@@ -6,5 +6,5 @@ go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/con
|
||||
|
||||
${OS_OUTPUT_BINPATH}/conversion-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:conversion-gen=")" \
|
||||
--output-file-base zz_generated.conversion
|
||||
--output-file zz_generated.conversion.go \
|
||||
$(find_dirs_containing_comment_tags "+k8s:conversion-gen=")
|
||||
|
||||
@@ -6,6 +6,6 @@ go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepc
|
||||
|
||||
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:deepcopy-gen=")" \
|
||||
--output-file-base zz_generated.deepcopy
|
||||
--output-file zz_generated.deepcopy.go \
|
||||
$(find_dirs_containing_comment_tags "+k8s:deepcopy-gen=")
|
||||
|
||||
|
||||
@@ -6,6 +6,6 @@ go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defa
|
||||
|
||||
${OS_OUTPUT_BINPATH}/defaulter-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:defaulter-gen=")" \
|
||||
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||
--output-file-base zz_generated.defaults
|
||||
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||
--output-file zz_generated.defaults.go \
|
||||
$(find_dirs_containing_comment_tags "+k8s:defaulter-gen=")
|
||||
|
||||
@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20') ]]; then
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20|go1.21|go1.22') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20') ]]; then
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20|go1.21|go1.22') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
1211
keps/753-descheduling-framework/README.md
Normal file
1211
keps/753-descheduling-framework/README.md
Normal file
File diff suppressed because it is too large
Load Diff
BIN
keps/753-descheduling-framework/framework-workflow-diagram.png
Normal file
BIN
keps/753-descheduling-framework/framework-workflow-diagram.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 48 KiB |
29
keps/753-descheduling-framework/kep.yaml
Normal file
29
keps/753-descheduling-framework/kep.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
title: Descheduling framework
|
||||
kep-number: 753
|
||||
authors:
|
||||
- "@ingvagabund"
|
||||
- "@damemi"
|
||||
owning-sig: sig-scheduling
|
||||
status: provisional
|
||||
creation-date: 2024-04-08
|
||||
reviewers:
|
||||
- "@ingvagabund"
|
||||
- "@damemi"
|
||||
- "@a7i"
|
||||
- "@knelasevero"
|
||||
approvers:
|
||||
- "@ingvagabund"
|
||||
- "@damemi"
|
||||
- "@a7i"
|
||||
- "@knelasevero"
|
||||
#replaces:
|
||||
|
||||
# The target maturity stage in the current dev cycle for this KEP.
|
||||
stage: alpha
|
||||
|
||||
# The most recent milestone for which work toward delivery of this KEP has been
|
||||
# done. This can be the current (upcoming) milestone, if it is being actively
|
||||
# worked on.
|
||||
# latest-milestone: "v1.19"
|
||||
|
||||
# disable-supported: true
|
||||
@@ -13,6 +13,7 @@ data:
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
- name: "RemovePodsViolatingInterPodAntiAffinity"
|
||||
- name: "RemoveDuplicates"
|
||||
- name: "LowNodeUtilization"
|
||||
args:
|
||||
thresholds:
|
||||
@@ -30,4 +31,4 @@ data:
|
||||
- "RemoveDuplicates"
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsViolatingInterPodAntiAffinity"
|
||||
- "RemovePodsViolatingInterPodAntiAffinity"
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.26.1
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.29.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: descheduler-sa
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.26.1
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.29.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.26.1
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.29.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -36,7 +36,7 @@ var (
|
||||
Name: "pods_evicted",
|
||||
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
}, []string{"result", "strategy", "namespace", "node"})
|
||||
}, []string{"result", "strategy", "profile", "namespace", "node"})
|
||||
|
||||
buildInfo = metrics.NewGauge(
|
||||
&metrics.GaugeOpts{
|
||||
|
||||
@@ -103,6 +103,8 @@ func (hi *handleImpl) Evictor() frameworktypes.Evictor {
|
||||
}
|
||||
|
||||
func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
||||
klog.V(1).Info("Warning: v1alpha1 API is deprecated and will be removed in a future release. Use v1alpha2 API instead.")
|
||||
|
||||
err := V1alpha1ToInternal(in, pluginregistry.PluginRegistry, out, s)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -137,6 +139,14 @@ func V1alpha1ToInternal(
|
||||
}
|
||||
}
|
||||
|
||||
evictDaemonSetPods := false
|
||||
if deschedulerPolicy.EvictDaemonSetPods != nil {
|
||||
evictDaemonSetPods = *deschedulerPolicy.EvictDaemonSetPods
|
||||
if evictDaemonSetPods {
|
||||
klog.V(1).Info("Warning: EvictDaemonSetPods is set to True. This could cause eviction of Kubernetes DaemonSet pods.")
|
||||
}
|
||||
}
|
||||
|
||||
ignorePvcPods := false
|
||||
if deschedulerPolicy.IgnorePVCPods != nil {
|
||||
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
|
||||
@@ -191,6 +201,7 @@ func V1alpha1ToInternal(
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: evictLocalStoragePods,
|
||||
EvictDaemonSetPods: evictDaemonSetPods,
|
||||
EvictSystemCriticalPods: evictSystemCriticalPods,
|
||||
IgnorePvcPods: ignorePvcPods,
|
||||
EvictFailedBarePods: evictBarePods,
|
||||
|
||||
@@ -19,7 +19,9 @@ package v1alpha1
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
@@ -170,10 +172,15 @@ var StrategyParamsToPluginArgs = map[string]func(params *StrategyParameters) (*a
|
||||
}, nil
|
||||
},
|
||||
"RemovePodsViolatingTopologySpreadConstraint": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
constraints := []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule}
|
||||
if params.IncludeSoftConstraints {
|
||||
constraints = append(constraints, v1.ScheduleAnyway)
|
||||
}
|
||||
args := &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
IncludeSoftConstraints: params.IncludeSoftConstraints,
|
||||
Constraints: constraints,
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
}
|
||||
if err := removepodsviolatingtopologyspreadconstraint.ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
@@ -567,13 +568,28 @@ func TestStrategyParamsToPluginArgsRemovePodsViolatingTopologySpreadConstraint(t
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
IncludeSoftConstraints: true,
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "params without soft constraints",
|
||||
params: &StrategyParameters{
|
||||
IncludeSoftConstraints: false,
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
|
||||
@@ -41,6 +41,9 @@ type DeschedulerPolicy struct {
|
||||
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
|
||||
EvictSystemCriticalPods *bool `json:"evictSystemCriticalPods,omitempty"`
|
||||
|
||||
// EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted.
|
||||
EvictDaemonSetPods *bool `json:"evictDaemonSetPods,omitempty"`
|
||||
|
||||
// IgnorePVCPods prevents pods with PVCs from being evicted.
|
||||
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
|
||||
|
||||
@@ -90,6 +93,7 @@ type StrategyParameters struct {
|
||||
NodeFit bool `json:"nodeFit"`
|
||||
IncludePreferNoSchedule bool `json:"includePreferNoSchedule"`
|
||||
ExcludedTaints []string `json:"excludedTaints,omitempty"`
|
||||
IncludedTaints []string `json:"includedTaints,omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
|
||||
12
pkg/api/v1alpha1/zz_generated.deepcopy.go
generated
12
pkg/api/v1alpha1/zz_generated.deepcopy.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -57,6 +57,11 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictDaemonSetPods != nil {
|
||||
in, out := &in.EvictDaemonSetPods, &out.EvictDaemonSetPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.IgnorePVCPods != nil {
|
||||
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
|
||||
*out = new(bool)
|
||||
@@ -366,6 +371,11 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.IncludedTaints != nil {
|
||||
in, out := &in.IncludedTaints, &out.IncludedTaints
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
2
pkg/api/v1alpha1/zz_generated.defaults.go
generated
2
pkg/api/v1alpha1/zz_generated.defaults.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -88,7 +88,7 @@ func convertToInternalPluginConfigArgs(out *api.DeschedulerPolicy) error {
|
||||
func Convert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
if _, ok := pluginregistry.PluginRegistry[in.Name]; ok {
|
||||
out.Args = pluginregistry.PluginRegistry[in.Name].PluginArgInstance
|
||||
out.Args = pluginregistry.PluginRegistry[in.Name].PluginArgInstance.DeepCopyObject()
|
||||
if in.Args.Raw != nil {
|
||||
_, _, err := Codecs.UniversalDecoder().Decode(in.Args.Raw, nil, out.Args)
|
||||
if err != nil {
|
||||
|
||||
2
pkg/api/v1alpha2/zz_generated.conversion.go
generated
2
pkg/api/v1alpha2/zz_generated.conversion.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
2
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
2
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
2
pkg/api/v1alpha2/zz_generated.defaults.go
generated
2
pkg/api/v1alpha2/zz_generated.defaults.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
2
pkg/api/zz_generated.deepcopy.go
generated
2
pkg/api/zz_generated.deepcopy.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
registry "k8s.io/component-base/logs/api/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -52,17 +51,41 @@ type DeschedulerConfiguration struct {
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool
|
||||
|
||||
// EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted.
|
||||
EvictDaemonSetPods bool
|
||||
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool
|
||||
|
||||
// Tracing specifies the options for tracing.
|
||||
Tracing TracingConfiguration
|
||||
|
||||
// LeaderElection starts Deployment using leader election loop
|
||||
LeaderElection componentbaseconfig.LeaderElectionConfiguration
|
||||
|
||||
// Logging specifies the options of logging.
|
||||
// Refer to [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
|
||||
Logging registry.LoggingConfiguration
|
||||
|
||||
// ClientConnection specifies the kubeconfig file and client connection settings to use when communicating with the apiserver.
|
||||
// Refer to [ClientConnection](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/componentconfig#ClientConnectionConfiguration) for more information.
|
||||
ClientConnection componentbaseconfig.ClientConnectionConfiguration
|
||||
}
|
||||
|
||||
type TracingConfiguration struct {
|
||||
// CollectorEndpoint is the address of the OpenTelemetry collector.
|
||||
// If not specified, tracing will be used NoopTraceProvider.
|
||||
CollectorEndpoint string
|
||||
// TransportCert is the path to the certificate file for the OpenTelemetry collector.
|
||||
// If not specified, provider will start in insecure mode.
|
||||
TransportCert string
|
||||
// ServiceName is the name of the service to be used in the OpenTelemetry collector.
|
||||
// If not specified, the default value is "descheduler".
|
||||
ServiceName string
|
||||
// ServiceNamespace is the namespace of the service to be used in the OpenTelemetry collector.
|
||||
// If not specified, tracing will be used default namespace.
|
||||
ServiceNamespace string
|
||||
// SampleRate is used to configure the sample rate of the OTEL trace collection. This value will
|
||||
// be used as the Base value with sample ratio. A value >= 1.0 will sample everything and < 0 will
|
||||
// not sample anything. Everything else is a percentage value.
|
||||
SampleRate float64
|
||||
// FallbackToNoOpProviderOnError can be set in case if you want your trace provider to fallback to
|
||||
// no op provider in case if the configured end point based provider can't be setup.
|
||||
FallbackToNoOpProviderOnError bool
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
registry "k8s.io/component-base/logs/api/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -52,17 +51,41 @@ type DeschedulerConfiguration struct {
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||
|
||||
// EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted.
|
||||
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
|
||||
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
|
||||
|
||||
// Tracing is used to setup the required OTEL tracing configuration
|
||||
Tracing TracingConfiguration `json:"tracing,omitempty"`
|
||||
|
||||
// LeaderElection starts Deployment using leader election loop
|
||||
LeaderElection componentbaseconfig.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
|
||||
|
||||
// Logging specifies the options of logging.
|
||||
// Refer to [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
|
||||
Logging registry.LoggingConfiguration `json:"logging,omitempty"`
|
||||
|
||||
// ClientConnection specifies the kubeconfig file and client connection settings to use when communicating with the apiserver.
|
||||
// Refer to [ClientConnection](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/componentconfig#ClientConnectionConfiguration) for more information.
|
||||
ClientConnection componentbaseconfig.ClientConnectionConfiguration `json:"clientConnection,omitempty"`
|
||||
}
|
||||
|
||||
type TracingConfiguration struct {
|
||||
// CollectorEndpoint is the address of the OpenTelemetry collector.
|
||||
// If not specified, tracing will be used NoopTraceProvider.
|
||||
CollectorEndpoint string `json:"collectorEndpoint"`
|
||||
// TransportCert is the path to the certificate file for the OpenTelemetry collector.
|
||||
// If not specified, provider will start in insecure mode.
|
||||
TransportCert string `json:"transportCert,omitempty"`
|
||||
// ServiceName is the name of the service to be used in the OpenTelemetry collector.
|
||||
// If not specified, the default value is "descheduler".
|
||||
ServiceName string `json:"serviceName,omitempty"`
|
||||
// ServiceNamespace is the namespace of the service to be used in the OpenTelemetry collector.
|
||||
// If not specified, tracing will be used default namespace.
|
||||
ServiceNamespace string `json:"serviceNamespace,omitempty"`
|
||||
// SampleRate is used to configure the sample rate of the OTEL trace collection. This value will
|
||||
// be used as the Base value with sample ratio. A value >= 1.0 will sample everything and < 0 will
|
||||
// not sample anything. Everything else is a percentage value.
|
||||
SampleRate float64 `json:"sampleRate"`
|
||||
// FallbackToNoOpProviderOnError can be set in case if you want your trace provider to fallback to
|
||||
// no op provider in case if the configured end point based provider can't be setup.
|
||||
FallbackToNoOpProviderOnError bool `json:"fallbackToNoOpProviderOnError"`
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -46,6 +46,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*TracingConfiguration)(nil), (*componentconfig.TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(a.(*TracingConfiguration), b.(*componentconfig.TracingConfiguration), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*componentconfig.TracingConfiguration)(nil), (*TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(a.(*componentconfig.TracingConfiguration), b.(*TracingConfiguration), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -57,9 +67,12 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.EvictDaemonSetPods = in.EvictDaemonSetPods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
if err := Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.LeaderElection = in.LeaderElection
|
||||
out.Logging = in.Logging
|
||||
out.ClientConnection = in.ClientConnection
|
||||
return nil
|
||||
}
|
||||
@@ -77,9 +90,12 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.EvictDaemonSetPods = in.EvictDaemonSetPods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
if err := Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.LeaderElection = in.LeaderElection
|
||||
out.Logging = in.Logging
|
||||
out.ClientConnection = in.ClientConnection
|
||||
return nil
|
||||
}
|
||||
@@ -88,3 +104,33 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
||||
func Convert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in *componentconfig.DeschedulerConfiguration, out *DeschedulerConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in *TracingConfiguration, out *componentconfig.TracingConfiguration, s conversion.Scope) error {
|
||||
out.CollectorEndpoint = in.CollectorEndpoint
|
||||
out.TransportCert = in.TransportCert
|
||||
out.ServiceName = in.ServiceName
|
||||
out.ServiceNamespace = in.ServiceNamespace
|
||||
out.SampleRate = in.SampleRate
|
||||
out.FallbackToNoOpProviderOnError = in.FallbackToNoOpProviderOnError
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in *TracingConfiguration, out *componentconfig.TracingConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *componentconfig.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
|
||||
out.CollectorEndpoint = in.CollectorEndpoint
|
||||
out.TransportCert = in.TransportCert
|
||||
out.ServiceName = in.ServiceName
|
||||
out.ServiceNamespace = in.ServiceNamespace
|
||||
out.SampleRate = in.SampleRate
|
||||
out.FallbackToNoOpProviderOnError = in.FallbackToNoOpProviderOnError
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration is an autogenerated conversion function.
|
||||
func Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *componentconfig.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -29,8 +29,8 @@ import (
|
||||
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.Tracing = in.Tracing
|
||||
out.LeaderElection = in.LeaderElection
|
||||
in.Logging.DeepCopyInto(&out.Logging)
|
||||
out.ClientConnection = in.ClientConnection
|
||||
return
|
||||
}
|
||||
@@ -52,3 +52,19 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
|
||||
func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TracingConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
20
pkg/apis/componentconfig/zz_generated.deepcopy.go
generated
20
pkg/apis/componentconfig/zz_generated.deepcopy.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -29,8 +29,8 @@ import (
|
||||
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.Tracing = in.Tracing
|
||||
out.LeaderElection = in.LeaderElection
|
||||
in.Logging.DeepCopyInto(&out.Logging)
|
||||
out.ClientConnection = in.ClientConnection
|
||||
return
|
||||
}
|
||||
@@ -52,3 +52,19 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
|
||||
func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TracingConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -18,42 +18,209 @@ package descheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/events"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
schedulingv1 "k8s.io/client-go/listers/scheduling/v1"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
frameworkprofile "sigs.k8s.io/descheduler/pkg/framework/profile"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status
|
||||
|
||||
type profileRunner struct {
|
||||
name string
|
||||
descheduleEPs, balanceEPs eprunner
|
||||
}
|
||||
|
||||
type descheduler struct {
|
||||
rs *options.DeschedulerServer
|
||||
podLister listersv1.PodLister
|
||||
nodeLister listersv1.NodeLister
|
||||
namespaceLister listersv1.NamespaceLister
|
||||
priorityClassLister schedulingv1.PriorityClassLister
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
evictionPolicyGroupVersion string
|
||||
deschedulerPolicy *api.DeschedulerPolicy
|
||||
eventRecorder events.EventRecorder
|
||||
}
|
||||
|
||||
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
namespaceLister := sharedInformerFactory.Core().V1().Namespaces().Lister()
|
||||
priorityClassLister := sharedInformerFactory.Scheduling().V1().PriorityClasses().Lister()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
return &descheduler{
|
||||
rs: rs,
|
||||
podLister: podLister,
|
||||
nodeLister: nodeLister,
|
||||
namespaceLister: namespaceLister,
|
||||
priorityClassLister: priorityClassLister,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
evictionPolicyGroupVersion: evictionPolicyGroupVersion,
|
||||
deschedulerPolicy: deschedulerPolicy,
|
||||
eventRecorder: eventRecorder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node) error {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "runDeschedulerLoop")
|
||||
defer span.End()
|
||||
defer func(loopStartDuration time.Time) {
|
||||
metrics.DeschedulerLoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
|
||||
}(time.Now())
|
||||
|
||||
// if len is still <= 1 error out
|
||||
if len(nodes) <= 1 {
|
||||
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||
return fmt.Errorf("the cluster size is 0 or 1")
|
||||
}
|
||||
|
||||
var client clientset.Interface
|
||||
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
|
||||
// So when evicting pods while running multiple strategies in a row have the cummulative effect
|
||||
// as is when evicting pods for real.
|
||||
if d.rs.DryRun {
|
||||
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
|
||||
// Create a new cache so we start from scratch without any leftovers
|
||||
fakeClient, err := cachedClient(d.rs.Client, d.podLister, d.nodeLister, d.namespaceLister, d.priorityClassLister)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create a new instance of the shared informer factor from the cached client
|
||||
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
// register the pod informer, otherwise it will not get running
|
||||
d.getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
|
||||
if err != nil {
|
||||
return fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
fakeCtx, cncl := context.WithCancel(context.TODO())
|
||||
defer cncl()
|
||||
fakeSharedInformerFactory.Start(fakeCtx.Done())
|
||||
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
|
||||
|
||||
client = fakeClient
|
||||
d.sharedInformerFactory = fakeSharedInformerFactory
|
||||
} else {
|
||||
client = d.rs.Client
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Building a pod evictor")
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
client,
|
||||
d.evictionPolicyGroupVersion,
|
||||
d.rs.DryRun,
|
||||
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
||||
nodes,
|
||||
!d.rs.DisableMetrics,
|
||||
d.eventRecorder,
|
||||
)
|
||||
|
||||
d.runProfiles(ctx, client, nodes, podEvictor)
|
||||
|
||||
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runProfiles runs all the deschedule plugins of all profiles and
|
||||
// later runs through all balance plugins of all profiles. (All Balance plugins should come after all Deschedule plugins)
|
||||
// see https://github.com/kubernetes-sigs/descheduler/issues/979
|
||||
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "runProfiles")
|
||||
defer span.End()
|
||||
var profileRunners []profileRunner
|
||||
for _, profile := range d.deschedulerPolicy.Profiles {
|
||||
currProfile, err := frameworkprofile.NewProfile(
|
||||
profile,
|
||||
pluginregistry.PluginRegistry,
|
||||
frameworkprofile.WithClientSet(client),
|
||||
frameworkprofile.WithSharedInformerFactory(d.sharedInformerFactory),
|
||||
frameworkprofile.WithPodEvictor(podEvictor),
|
||||
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
|
||||
continue
|
||||
}
|
||||
profileRunners = append(profileRunners, profileRunner{profile.Name, currProfile.RunDeschedulePlugins, currProfile.RunBalancePlugins})
|
||||
}
|
||||
|
||||
for _, profileR := range profileRunners {
|
||||
// First deschedule
|
||||
status := profileR.descheduleEPs(ctx, nodes)
|
||||
if status != nil && status.Err != nil {
|
||||
span.AddEvent("failed to perform deschedule operations", trace.WithAttributes(attribute.String("err", status.Err.Error()), attribute.String("profile", profileR.name), attribute.String("operation", tracing.DescheduleOperation)))
|
||||
klog.ErrorS(status.Err, "running deschedule extension point failed with error", "profile", profileR.name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for _, profileR := range profileRunners {
|
||||
// Balance Later
|
||||
status := profileR.balanceEPs(ctx, nodes)
|
||||
if status != nil && status.Err != nil {
|
||||
span.AddEvent("failed to perform balance operations", trace.WithAttributes(attribute.String("err", status.Err.Error()), attribute.String("profile", profileR.name), attribute.String("operation", tracing.BalanceOperation)))
|
||||
klog.ErrorS(status.Err, "running balance extension point failed with error", "profile", profileR.name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "Run")
|
||||
defer span.End()
|
||||
metrics.Register()
|
||||
|
||||
clientConnection := rs.ClientConnection
|
||||
@@ -76,7 +243,9 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
}
|
||||
|
||||
// Add k8s compatibility warnings to logs
|
||||
versionCompatibilityCheck(rs)
|
||||
if err := validateVersionCompatibility(rs.Client.Discovery(), version.Get()); err != nil {
|
||||
klog.Warning(err.Error())
|
||||
}
|
||||
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(rs.Client)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
@@ -88,15 +257,17 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
}
|
||||
|
||||
if rs.LeaderElection.LeaderElect && rs.DeschedulingInterval.Seconds() == 0 {
|
||||
span.AddEvent("Validation Failure", trace.WithAttributes(attribute.String("err", "leaderElection must be used with deschedulingInterval")))
|
||||
return fmt.Errorf("leaderElection must be used with deschedulingInterval")
|
||||
}
|
||||
|
||||
if rs.LeaderElection.LeaderElect && rs.DryRun {
|
||||
klog.V(1).InfoS("Warning: DryRun is set to True. You need to disable it to use Leader Election.")
|
||||
klog.V(1).Info("Warning: DryRun is set to True. You need to disable it to use Leader Election.")
|
||||
}
|
||||
|
||||
if rs.LeaderElection.LeaderElect && !rs.DryRun {
|
||||
if err := NewLeaderElection(runFn, rsclient, &rs.LeaderElection, ctx); err != nil {
|
||||
span.AddEvent("Leader Election Failure", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
return fmt.Errorf("leaderElection: %w", err)
|
||||
}
|
||||
return nil
|
||||
@@ -105,27 +276,34 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
return runFn()
|
||||
}
|
||||
|
||||
func versionCompatibilityCheck(rs *options.DeschedulerServer) {
|
||||
serverVersion, serverErr := rs.Client.Discovery().ServerVersion()
|
||||
if serverErr != nil {
|
||||
klog.V(1).InfoS("Warning: Get Kubernetes server version fail")
|
||||
return
|
||||
}
|
||||
|
||||
deschedulerMinorVersion := strings.Split(version.Get().Minor, ".")[0]
|
||||
deschedulerMinorVersionFloat, err := strconv.ParseFloat(deschedulerMinorVersion, 64)
|
||||
func validateVersionCompatibility(discovery discovery.DiscoveryInterface, versionInfo version.Info) error {
|
||||
serverVersionInfo, err := discovery.ServerVersion()
|
||||
if err != nil {
|
||||
klog.Warning("Warning: Convert Descheduler minor version to float fail")
|
||||
return errors.New("failed to discover Kubernetes server version")
|
||||
}
|
||||
|
||||
kubernetesMinorVersionFloat, err := strconv.ParseFloat(serverVersion.Minor, 64)
|
||||
serverVersion, err := utilversion.ParseSemantic(serverVersionInfo.String())
|
||||
if err != nil {
|
||||
klog.Warning("Warning: Convert Kubernetes server minor version to float fail")
|
||||
return errors.New("failed to parse Kubernetes server version")
|
||||
}
|
||||
|
||||
if math.Abs(deschedulerMinorVersionFloat-kubernetesMinorVersionFloat) > 3 {
|
||||
klog.Warningf("Warning: Descheduler minor version %v is not supported on your version of Kubernetes %v.%v. See compatibility docs for more info: https://github.com/kubernetes-sigs/descheduler#compatibility-matrix", deschedulerMinorVersion, serverVersion.Major, serverVersion.Minor)
|
||||
deschedulerVersion, err := utilversion.ParseGeneric(versionInfo.GitVersion)
|
||||
if err != nil {
|
||||
return errors.New("failed to convert Descheduler minor version to float")
|
||||
}
|
||||
|
||||
deschedulerMinor := float64(deschedulerVersion.Minor())
|
||||
serverMinor := float64(serverVersion.Minor())
|
||||
if math.Abs(deschedulerMinor-serverMinor) > 3 {
|
||||
return fmt.Errorf(
|
||||
"descheduler version %v may not be supported on your version of Kubernetes %v."+
|
||||
"See compatibility docs for more info: https://github.com/kubernetes-sigs/descheduler#compatibility-matrix",
|
||||
deschedulerVersion.String(),
|
||||
serverVersionInfo.String(),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cachedClient(
|
||||
@@ -205,23 +383,12 @@ func cachedClient(
|
||||
}
|
||||
|
||||
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "RunDeschedulerStrategies")
|
||||
defer span.End()
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
namespaceLister := sharedInformerFactory.Core().V1().Namespaces().Lister()
|
||||
priorityClassLister := sharedInformerFactory.Scheduling().V1().PriorityClasses().Lister()
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
var nodeSelector string
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
@@ -234,103 +401,38 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
} else {
|
||||
eventClient = rs.Client
|
||||
}
|
||||
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
|
||||
defer eventBroadcaster.Shutdown()
|
||||
|
||||
cycleSharedInformerFactory := sharedInformerFactory
|
||||
descheduler, err := newDescheduler(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory)
|
||||
if err != nil {
|
||||
span.AddEvent("Failed to create new descheduler", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
return err
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
wait.NonSlidingUntil(func() {
|
||||
loopStartDuration := time.Now()
|
||||
defer metrics.DeschedulerLoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
|
||||
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeLister, nodeSelector)
|
||||
// A next context is created here intentionally to avoid nesting the spans via context.
|
||||
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
|
||||
defer sSpan.End()
|
||||
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, nodeLister, nodeSelector)
|
||||
if err != nil {
|
||||
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
|
||||
sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
klog.Error(err)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
if len(nodes) <= 1 {
|
||||
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||
err = descheduler.runDeschedulerLoop(sCtx, nodes)
|
||||
if err != nil {
|
||||
sSpan.AddEvent("Failed to run descheduler loop", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
klog.Error(err)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
var client clientset.Interface
|
||||
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
|
||||
// So when evicting pods while running multiple strategies in a row have the cummulative effect
|
||||
// as is when evicting pods for real.
|
||||
if rs.DryRun {
|
||||
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
|
||||
// Create a new cache so we start from scratch without any leftovers
|
||||
fakeClient, err := cachedClient(rs.Client, podLister, nodeLister, namespaceLister, priorityClassLister)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// create a new instance of the shared informer factor from the cached client
|
||||
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
// register the pod informer, otherwise it will not get running
|
||||
getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
|
||||
if err != nil {
|
||||
klog.Errorf("build get pods assigned to node function error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fakeCtx, cncl := context.WithCancel(context.TODO())
|
||||
defer cncl()
|
||||
fakeSharedInformerFactory.Start(fakeCtx.Done())
|
||||
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
|
||||
|
||||
client = fakeClient
|
||||
cycleSharedInformerFactory = fakeSharedInformerFactory
|
||||
} else {
|
||||
client = rs.Client
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Building a pod evictor")
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
client,
|
||||
evictionPolicyGroupVersion,
|
||||
rs.DryRun,
|
||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
||||
nodes,
|
||||
!rs.DisableMetrics,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
for _, profile := range deschedulerPolicy.Profiles {
|
||||
currProfile, err := frameworkprofile.NewProfile(
|
||||
profile,
|
||||
pluginregistry.PluginRegistry,
|
||||
frameworkprofile.WithClientSet(client),
|
||||
frameworkprofile.WithSharedInformerFactory(cycleSharedInformerFactory),
|
||||
frameworkprofile.WithPodEvictor(podEvictor),
|
||||
frameworkprofile.WithGetPodsAssignedToNodeFnc(getPodsAssignedToNode),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// First deschedule
|
||||
status := currProfile.RunDeschedulePlugins(ctx, nodes)
|
||||
if status != nil && status.Err != nil {
|
||||
klog.ErrorS(status.Err, "running deschedule extension point failed with error", "profile", profile.Name)
|
||||
continue
|
||||
}
|
||||
// Then balance
|
||||
status = currProfile.RunBalancePlugins(ctx, nodes)
|
||||
if status != nil && status.Err != nil {
|
||||
klog.ErrorS(status.Err, "running balance extension point failed with error", "profile", profile.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
|
||||
|
||||
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
|
||||
if rs.DeschedulingInterval.Seconds() == 0 {
|
||||
cancel()
|
||||
@@ -362,3 +464,10 @@ func createClients(clientConnection componentbaseconfig.ClientConnectionConfigur
|
||||
|
||||
return kClient, eventClient, nil
|
||||
}
|
||||
|
||||
func trimManagedFields(obj interface{}) (interface{}, error) {
|
||||
if accessor, err := meta.Accessor(obj); err == nil {
|
||||
accessor.SetManagedFields(nil)
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
apiversion "k8s.io/apimachinery/pkg/version"
|
||||
fakediscovery "k8s.io/client-go/discovery/fake"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
@@ -20,6 +22,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||
deschedulerversion "sigs.k8s.io/descheduler/pkg/version"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -245,6 +248,67 @@ func TestRootCancelWithNoInterval(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateVersionCompatibility(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
deschedulerVersion string
|
||||
serverVersion string
|
||||
expectError bool
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
name: "no error when descheduler minor equals to server minor",
|
||||
deschedulerVersion: "v0.26",
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "no error when descheduler minor is 3 behind server minor",
|
||||
deschedulerVersion: "0.23",
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "no error when descheduler minor is 3 ahead of server minor",
|
||||
deschedulerVersion: "v0.26",
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "error when descheduler minor is 4 behind server minor",
|
||||
deschedulerVersion: "v0.22",
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "error when descheduler minor is 4 ahead of server minor",
|
||||
deschedulerVersion: "v0.27",
|
||||
serverVersion: "v1.23.1",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "no error when using managed provider version",
|
||||
deschedulerVersion: "v0.25",
|
||||
serverVersion: "v1.25.12-eks-2d98532",
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
client := fakeclientset.NewSimpleClientset()
|
||||
fakeDiscovery, _ := client.Discovery().(*fakediscovery.FakeDiscovery)
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeDiscovery.FakedServerVersion = &apiversion.Info{GitVersion: tc.serverVersion}
|
||||
deschedulerVersion := deschedulerversion.Info{GitVersion: tc.deschedulerVersion}
|
||||
err := validateVersionCompatibility(fakeDiscovery, deschedulerVersion)
|
||||
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
t.Error("unexpected version compatibility behavior")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -30,6 +32,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
// nodePodEvictedCount keeps count of pods evicted on node
|
||||
@@ -108,41 +111,46 @@ func (pe *PodEvictor) NodeLimitExceeded(node *v1.Node) bool {
|
||||
type EvictOptions struct {
|
||||
// Reason allows for passing details about the specific eviction for logging.
|
||||
Reason string
|
||||
// ProfileName allows for passing details about profile for observability.
|
||||
ProfileName string
|
||||
// StrategyName allows for passing details about strategy for observability.
|
||||
StrategyName string
|
||||
}
|
||||
|
||||
// EvictPod evicts a pod while exercising eviction limits.
|
||||
// Returns true when the pod is evicted on the server side.
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) bool {
|
||||
// TODO: Replace context-propagated Strategy name with a defined framework handle for accessing Strategy info
|
||||
strategy := ""
|
||||
if ctx.Value("strategyName") != nil {
|
||||
strategy = ctx.Value("strategyName").(string)
|
||||
}
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "EvictPod", trace.WithAttributes(attribute.String("podName", pod.Name), attribute.String("podNamespace", pod.Namespace), attribute.String("reason", opts.Reason), attribute.String("operation", tracing.EvictOperation)))
|
||||
defer span.End()
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per node reached"), "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per node reached")))
|
||||
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per node reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per namespace reached"), "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per namespace reached")))
|
||||
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per namespace reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
||||
return false
|
||||
}
|
||||
|
||||
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
|
||||
if err != nil {
|
||||
// err is used only for logging purposes
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -153,16 +161,16 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
pe.namespacePodCount[pod.Namespace]++
|
||||
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
|
||||
if pe.dryRun {
|
||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", opts.StrategyName, "node", pod.Spec.NodeName, "profile", opts.ProfileName)
|
||||
} else {
|
||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", opts.StrategyName, "node", pod.Spec.NodeName, "profile", opts.ProfileName)
|
||||
reason := opts.Reason
|
||||
if len(reason) == 0 {
|
||||
reason = strategy
|
||||
reason = opts.StrategyName
|
||||
if len(reason) == 0 {
|
||||
reason = "NotSet"
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -120,8 +121,7 @@ func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v
|
||||
}
|
||||
// Check if the pod can fit on a node based off it's requests
|
||||
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
|
||||
ok, reqErrors := fitsRequest(nodeIndexer, pod, node)
|
||||
if !ok {
|
||||
if ok, reqErrors := fitsRequest(nodeIndexer, pod, node); !ok {
|
||||
errors = append(errors, reqErrors...)
|
||||
}
|
||||
}
|
||||
@@ -130,6 +130,13 @@ func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v
|
||||
errors = append(errors, fmt.Errorf("node is not schedulable"))
|
||||
}
|
||||
|
||||
// Check if pod matches inter-pod anti-affinity rule of pod on node
|
||||
if match, err := podMatchesInterPodAntiAffinity(nodeIndexer, pod, node); err != nil {
|
||||
errors = append(errors, err)
|
||||
} else if match {
|
||||
errors = append(errors, fmt.Errorf("pod matches inter-pod anti-affinity rule of other pod on node"))
|
||||
}
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
@@ -146,13 +153,11 @@ func PodFitsAnyOtherNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
for _, err := range errors {
|
||||
klog.V(4).InfoS(err.Error())
|
||||
}
|
||||
}
|
||||
klog.V(4).InfoS("Pod does not fit on any other node",
|
||||
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -164,13 +169,11 @@ func PodFitsAnyNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod,
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
for _, err := range errors {
|
||||
klog.V(4).InfoS(err.Error())
|
||||
}
|
||||
}
|
||||
klog.V(4).InfoS("Pod does not fit on any node",
|
||||
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -181,12 +184,11 @@ func PodFitsCurrentNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.P
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
for _, err := range errors {
|
||||
klog.V(4).InfoS(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("Pod does not fit on current node",
|
||||
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -294,3 +296,56 @@ func IsBasicResource(name v1.ResourceName) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// GetNodeWeightGivenPodPreferredAffinity returns the weight
|
||||
// that the pod gives to a node by analyzing the soft node affinity of that pod
|
||||
// (nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution)
|
||||
func GetNodeWeightGivenPodPreferredAffinity(pod *v1.Pod, node *v1.Node) int32 {
|
||||
totalWeight, err := utils.GetNodeWeightGivenPodPreferredAffinity(pod, node)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return totalWeight
|
||||
}
|
||||
|
||||
// GetBestNodeWeightGivenPodPreferredAffinity returns the best weight
|
||||
// (maximum one) that the pod gives to the best node by analyzing the soft node affinity
|
||||
// of that pod (nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution)
|
||||
func GetBestNodeWeightGivenPodPreferredAffinity(pod *v1.Pod, nodes []*v1.Node) int32 {
|
||||
var bestWeight int32 = 0
|
||||
for _, node := range nodes {
|
||||
weight := GetNodeWeightGivenPodPreferredAffinity(pod, node)
|
||||
if weight > bestWeight {
|
||||
bestWeight = weight
|
||||
}
|
||||
}
|
||||
return bestWeight
|
||||
}
|
||||
|
||||
// PodMatchNodeSelector checks if a pod node selector matches the node label.
|
||||
func PodMatchNodeSelector(pod *v1.Pod, node *v1.Node) bool {
|
||||
matches, err := utils.PodMatchNodeSelector(pod, node)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
// podMatchesInterPodAntiAffinity checks if the pod matches the anti-affinity rule
|
||||
// of another pod that is already on the given node.
|
||||
// If a match is found, it returns true.
|
||||
func podMatchesInterPodAntiAffinity(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if pod.Spec.Affinity == nil || pod.Spec.Affinity.PodAntiAffinity == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error listing all pods: %v", err)
|
||||
}
|
||||
|
||||
podsInANamespace := podutil.GroupByNamespace(podsOnNode)
|
||||
nodeMap := utils.CreateNodeMap([]*v1.Node{node})
|
||||
|
||||
return utils.CheckPodsWithAntiAffinityExist(pod, podsInANamespace, nodeMap), nil
|
||||
}
|
||||
|
||||
@@ -754,7 +754,11 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNodeFit(t *testing.T) {
|
||||
node := test.BuildTestNode("node", 64000, 128*1000*1000*1000, 2, nil)
|
||||
node := test.BuildTestNode("node", 64000, 128*1000*1000*1000, 2, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"region": "main-region",
|
||||
}
|
||||
})
|
||||
tests := []struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
@@ -781,6 +785,22 @@ func TestNodeFit(t *testing.T) {
|
||||
},
|
||||
err: errors.New("insufficient pods"),
|
||||
},
|
||||
{
|
||||
description: "matches inter-pod anti-affinity rule of pod on node",
|
||||
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
},
|
||||
err: errors.New("pod matches inter-pod anti-affinity rule of other pod on node"),
|
||||
},
|
||||
{
|
||||
description: "pod fits on node",
|
||||
pod: test.BuildTestPod("p1", 1000, 1000, "", func(pod *v1.Pod) {}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
err: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@@ -804,13 +824,86 @@ func TestNodeFit(t *testing.T) {
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
if errs := NodeFit(getPodsAssignedToNode, tc.pod, tc.node); (len(errs) == 0 && tc.err != nil) || errs[0].Error() != tc.err.Error() {
|
||||
errs := NodeFit(getPodsAssignedToNode, tc.pod, tc.node)
|
||||
if (len(errs) == 0 && tc.err != nil) || (len(errs) > 0 && errs[0].Error() != tc.err.Error()) {
|
||||
t.Errorf("Test %#v failed, got %v, expect %v", tc.description, errs, tc.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBestPodNodeAffinityWeight(t *testing.T) {
|
||||
defaultPod := test.BuildTestPod("p1", 0, 0, "node1", func(p *v1.Pod) {
|
||||
p.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Weight: 10,
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: "In",
|
||||
Values: []string{"value1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
tests := []struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedWeight int32
|
||||
}{
|
||||
{
|
||||
description: "No node matches the preferred affinity",
|
||||
pod: defaultPod,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"key2": "value2",
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"key3": "value3",
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedWeight: 0,
|
||||
},
|
||||
{
|
||||
description: "A single node matches the preferred affinity",
|
||||
pod: defaultPod,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node1", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"key1": "value1",
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node2", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"key2": "value2",
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedWeight: 10,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
bestWeight := GetBestNodeWeightGivenPodPreferredAffinity(tc.pod, tc.nodes)
|
||||
if bestWeight != tc.expectedWeight {
|
||||
t.Errorf("Test %#v failed", tc.description)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// createResourceList builds a small resource list of core resources
|
||||
func createResourceList(cpu, memory, ephemeralStorage int64) v1.ResourceList {
|
||||
resourceList := make(map[v1.ResourceName]resource.Quantity)
|
||||
|
||||
@@ -53,8 +53,8 @@ func WrapFilterFuncs(filters ...FilterFunc) FilterFunc {
|
||||
|
||||
type Options struct {
|
||||
filter FilterFunc
|
||||
includedNamespaces sets.String
|
||||
excludedNamespaces sets.String
|
||||
includedNamespaces sets.Set[string]
|
||||
excludedNamespaces sets.Set[string]
|
||||
labelSelector *metav1.LabelSelector
|
||||
}
|
||||
|
||||
@@ -71,13 +71,13 @@ func (o *Options) WithFilter(filter FilterFunc) *Options {
|
||||
}
|
||||
|
||||
// WithNamespaces sets included namespaces
|
||||
func (o *Options) WithNamespaces(namespaces sets.String) *Options {
|
||||
func (o *Options) WithNamespaces(namespaces sets.Set[string]) *Options {
|
||||
o.includedNamespaces = namespaces
|
||||
return o
|
||||
}
|
||||
|
||||
// WithoutNamespaces sets excluded namespaces
|
||||
func (o *Options) WithoutNamespaces(namespaces sets.String) *Options {
|
||||
func (o *Options) WithoutNamespaces(namespaces sets.Set[string]) *Options {
|
||||
o.excludedNamespaces = namespaces
|
||||
return o
|
||||
}
|
||||
@@ -142,21 +142,39 @@ func BuildGetPodsAssignedToNodeFunc(podInformer cache.SharedIndexInformer) (GetP
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods := make([]*v1.Pod, 0, len(objs))
|
||||
for _, obj := range objs {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if filter(pod) {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
}
|
||||
return pods, nil
|
||||
return ConvertToPods(objs, filter), nil
|
||||
}
|
||||
return getPodsAssignedToNode, nil
|
||||
}
|
||||
|
||||
func ConvertToPods(objs []interface{}, filter FilterFunc) []*v1.Pod {
|
||||
pods := make([]*v1.Pod, 0, len(objs))
|
||||
for _, obj := range objs {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if filter == nil || filter(pod) {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
// ListPodsOnNodes returns all pods on given nodes.
|
||||
func ListPodsOnNodes(nodes []*v1.Node, getPodsAssignedToNode GetPodsAssignedToNodeFunc, filter FilterFunc) ([]*v1.Pod, error) {
|
||||
pods := make([]*v1.Pod, 0)
|
||||
for _, node := range nodes {
|
||||
podsOnNode, err := ListPodsOnANode(node.Name, getPodsAssignedToNode, filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pods = append(pods, podsOnNode...)
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
// ListPodsOnANode lists all pods on a node.
|
||||
// It also accepts a "filter" function which can be used to further limit the pods that are returned.
|
||||
// (Usually this is podEvictor.Evictable().IsEvictable, in order to only list the evictable pods on a node, but can
|
||||
@@ -187,11 +205,28 @@ func ListAllPodsOnANode(
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
func GroupByNamespace(pods []*v1.Pod) map[string][]*v1.Pod {
|
||||
m := make(map[string][]*v1.Pod)
|
||||
for i := 0; i < len(pods); i++ {
|
||||
pod := pods[i]
|
||||
m[pod.Namespace] = append(m[pod.Namespace], pod)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// OwnerRef returns the ownerRefList for the pod.
|
||||
func OwnerRef(pod *v1.Pod) []metav1.OwnerReference {
|
||||
return pod.ObjectMeta.GetOwnerReferences()
|
||||
}
|
||||
|
||||
func OwnerRefUIDs(pod *v1.Pod) []string {
|
||||
var ownerRefUIDs []string
|
||||
for _, ownerRef := range OwnerRef(pod) {
|
||||
ownerRefUIDs = append(ownerRefUIDs, string(ownerRef.UID))
|
||||
}
|
||||
return ownerRefUIDs
|
||||
}
|
||||
|
||||
func IsBestEffortPod(pod *v1.Pod) bool {
|
||||
return utils.GetPodQOS(pod) == v1.PodQOSBestEffort
|
||||
}
|
||||
@@ -234,3 +269,12 @@ func SortPodsBasedOnAge(pods []*v1.Pod) {
|
||||
return pods[i].CreationTimestamp.Before(&pods[j].CreationTimestamp)
|
||||
})
|
||||
}
|
||||
|
||||
func GroupByNodeName(pods []*v1.Pod) map[string][]*v1.Pod {
|
||||
m := make(map[string][]*v1.Pod)
|
||||
for i := 0; i < len(pods); i++ {
|
||||
pod := pods[i]
|
||||
m[pod.Spec.NodeName] = append(m[pod.Spec.NodeName], pod)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
@@ -176,3 +176,67 @@ func TestSortPodsBasedOnAge(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGroupByNodeName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pods []*v1.Pod
|
||||
expMap map[string][]*v1.Pod
|
||||
}{
|
||||
{
|
||||
name: "list of pods is empty",
|
||||
pods: []*v1.Pod{},
|
||||
expMap: map[string][]*v1.Pod{},
|
||||
},
|
||||
{
|
||||
name: "pods are on same node",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
},
|
||||
expMap: map[string][]*v1.Pod{"node1": {
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "pods are on different nodes",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node2",
|
||||
}},
|
||||
},
|
||||
expMap: map[string][]*v1.Pod{
|
||||
"node1": {
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
},
|
||||
"node2": {
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node2",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
resultMap := GroupByNodeName(test.pods)
|
||||
if !reflect.DeepEqual(resultMap, test.expMap) {
|
||||
t.Errorf("Expected %v node map, got %v", test.expMap, resultMap)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -133,29 +135,22 @@ func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interfac
|
||||
}
|
||||
|
||||
func validateDeschedulerConfiguration(in api.DeschedulerPolicy, registry pluginregistry.Registry) error {
|
||||
var errorsInProfiles error
|
||||
var errorsInProfiles []error
|
||||
for _, profile := range in.Profiles {
|
||||
for _, pluginConfig := range profile.PluginConfigs {
|
||||
if _, ok := registry[pluginConfig.Name]; ok {
|
||||
if _, ok := registry[pluginConfig.Name]; !ok {
|
||||
errorsInProfiles = fmt.Errorf("%w: %s", errorsInProfiles, fmt.Sprintf("in profile %s: plugin %s in pluginConfig not registered", profile.Name, pluginConfig.Name))
|
||||
continue
|
||||
}
|
||||
if _, ok := registry[pluginConfig.Name]; !ok {
|
||||
errorsInProfiles = append(errorsInProfiles, fmt.Errorf("in profile %s: plugin %s in pluginConfig not registered", profile.Name, pluginConfig.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
pluginUtilities := registry[pluginConfig.Name]
|
||||
if pluginUtilities.PluginArgValidator == nil {
|
||||
continue
|
||||
}
|
||||
err := pluginUtilities.PluginArgValidator(pluginConfig.Args)
|
||||
if err != nil {
|
||||
if errorsInProfiles == nil {
|
||||
errorsInProfiles = fmt.Errorf("in profile %s: %s", profile.Name, err.Error())
|
||||
} else {
|
||||
errorsInProfiles = fmt.Errorf("%w: %s", errorsInProfiles, fmt.Sprintf("in profile %s: %s", profile.Name, err.Error()))
|
||||
}
|
||||
}
|
||||
pluginUtilities := registry[pluginConfig.Name]
|
||||
if pluginUtilities.PluginArgValidator == nil {
|
||||
continue
|
||||
}
|
||||
if err := pluginUtilities.PluginArgValidator(pluginConfig.Args); err != nil {
|
||||
errorsInProfiles = append(errorsInProfiles, fmt.Errorf("in profile %s: %s", profile.Name, err.Error()))
|
||||
}
|
||||
}
|
||||
}
|
||||
return errorsInProfiles
|
||||
return utilerrors.NewAggregate(errorsInProfiles)
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -133,6 +134,71 @@ func TestV1alpha1ToV1alpha2(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "convert global policy fields to defaultevictor",
|
||||
policy: &v1alpha1.DeschedulerPolicy{
|
||||
EvictFailedBarePods: utilpointer.Bool(true),
|
||||
EvictLocalStoragePods: utilpointer.Bool(true),
|
||||
EvictSystemCriticalPods: utilpointer.Bool(true),
|
||||
EvictDaemonSetPods: utilpointer.Bool(true),
|
||||
IgnorePVCPods: utilpointer.Bool(true),
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
removeduplicates.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
Namespaces: &v1alpha1.Namespaces{
|
||||
Exclude: []string{
|
||||
"test2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removeduplicates.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
EvictSystemCriticalPods: true,
|
||||
IgnorePvcPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: removeduplicates.PluginName,
|
||||
Args: &removeduplicates.RemoveDuplicatesArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{
|
||||
"test2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{removeduplicates.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "convert all strategies",
|
||||
policy: &v1alpha1.DeschedulerPolicy{
|
||||
@@ -399,7 +465,10 @@ func TestV1alpha1ToV1alpha2(t *testing.T) {
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
@@ -714,7 +783,8 @@ func TestV1alpha1ToV1alpha2(t *testing.T) {
|
||||
{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
IncludeSoftConstraints: true,
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -857,6 +927,79 @@ strategies:
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "v1alpha1 to internal with priorityThreshold",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 5
|
||||
namespaces:
|
||||
include:
|
||||
- "testleaderelection-a"
|
||||
thresholdPriority: null
|
||||
thresholdPriorityClassName: prioritym
|
||||
`),
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", podlifetime.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: "DefaultEvictor",
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: utilpointer.Int32(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: podlifetime.PluginName,
|
||||
Args: &podlifetime.PodLifeTimeArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Include: []string{"testleaderelection-a"},
|
||||
},
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(5),
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{podlifetime.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "v1alpha1 to internal with priorityThreshold value and name should return error",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 5
|
||||
namespaces:
|
||||
include:
|
||||
- "testleaderelection-a"
|
||||
thresholdPriority: 222
|
||||
thresholdPriorityClassName: prioritym
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("failed decoding descheduler's policy config \"filename\": priority threshold misconfigured for plugin PodLifeTime"),
|
||||
},
|
||||
{
|
||||
description: "v1alpha2 to internal",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
@@ -869,6 +1012,7 @@ profiles:
|
||||
evictSystemCriticalPods: true
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
evictDaemonSetPods: true
|
||||
nodeFit: true
|
||||
- name: "RemovePodsHavingTooManyRestarts"
|
||||
args:
|
||||
@@ -890,6 +1034,7 @@ profiles:
|
||||
EvictSystemCriticalPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
@@ -925,7 +1070,7 @@ profiles:
|
||||
if err != nil {
|
||||
if tc.err == nil {
|
||||
t.Errorf("unexpected error: %s.", err.Error())
|
||||
} else {
|
||||
} else if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s. Was expecting %s", err.Error(), tc.err.Error())
|
||||
}
|
||||
}
|
||||
@@ -985,7 +1130,7 @@ func TestValidateDeschedulerConfiguration(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("in profile RemoveFailedPods: only one of Include/Exclude namespaces can be set: in profile RemovePodsViolatingTopologySpreadConstraint: only one of Include/Exclude namespaces can be set"),
|
||||
result: fmt.Errorf("[in profile RemoveFailedPods: only one of Include/Exclude namespaces can be set, in profile RemovePodsViolatingTopologySpreadConstraint: only one of Include/Exclude namespaces can be set]"),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1021,6 +1166,7 @@ profiles:
|
||||
evictSystemCriticalPods: true
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
evictDaemonSetPods: true
|
||||
nodeFit: true
|
||||
- name: "RemoveFailedPods"
|
||||
plugins:
|
||||
@@ -1045,6 +1191,7 @@ profiles:
|
||||
EvictSystemCriticalPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
@@ -1083,6 +1230,7 @@ profiles:
|
||||
evictSystemCriticalPods: true
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
evictDaemonSetPods: true
|
||||
nodeFit: true
|
||||
- name: "RemoveFailedPods"
|
||||
plugins:
|
||||
@@ -1101,6 +1249,7 @@ profiles:
|
||||
EvictSystemCriticalPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -16,13 +16,15 @@ package defaultevictor
|
||||
import (
|
||||
// "context"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -45,7 +47,7 @@ type constraint func(pod *v1.Pod) error
|
||||
// This plugin is only meant to customize other actions (extension points) of the evictor,
|
||||
// like filtering, sorting, and other ones that might be relevant in the future
|
||||
type DefaultEvictor struct {
|
||||
args runtime.Object
|
||||
args *DefaultEvictorArgs
|
||||
constraints []constraint
|
||||
handle frameworktypes.Handle
|
||||
}
|
||||
@@ -68,9 +70,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil, fmt.Errorf("want args to be of type defaultEvictorFilterArgs, got %T", args)
|
||||
}
|
||||
|
||||
ev := &DefaultEvictor{}
|
||||
ev.handle = handle
|
||||
ev.args = defaultEvictorArgs
|
||||
ev := &DefaultEvictor{
|
||||
handle: handle,
|
||||
args: defaultEvictorArgs,
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.EvictFailedBarePods {
|
||||
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
|
||||
@@ -122,6 +125,15 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if !defaultEvictorArgs.EvictDaemonSetPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if utils.IsDaemonsetPod(ownerRefList) {
|
||||
return fmt.Errorf("pod is related to daemonset and descheduler is not configured with evictDaemonSetPods")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if defaultEvictorArgs.IgnorePvcPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithPVC(pod) {
|
||||
@@ -143,6 +155,36 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
})
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.MinReplicas > 1 {
|
||||
indexName := "metadata.ownerReferences"
|
||||
indexer, err := getPodIndexerByOwnerRefs(indexName, handle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(pod.OwnerReferences) > 1 {
|
||||
klog.V(5).InfoS("pod has multiple owner references which is not supported for minReplicas check", "size", len(pod.OwnerReferences), "pod", klog.KObj(pod))
|
||||
return nil
|
||||
}
|
||||
|
||||
ownerRef := pod.OwnerReferences[0]
|
||||
objs, err := indexer.ByIndex(indexName, string(ownerRef.UID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list pods for minReplicas filter in the policy parameter")
|
||||
}
|
||||
|
||||
if uint(len(objs)) < defaultEvictorArgs.MinReplicas {
|
||||
return fmt.Errorf("owner has %d replicas which is less than minReplicas of %d", len(objs), defaultEvictorArgs.MinReplicas)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
@@ -152,15 +194,14 @@ func (d *DefaultEvictor) Name() string {
|
||||
}
|
||||
|
||||
func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
defaultEvictorArgs := d.args.(*DefaultEvictorArgs)
|
||||
if defaultEvictorArgs.NodeFit {
|
||||
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes().Lister(), defaultEvictorArgs.NodeSelector)
|
||||
if d.args.NodeFit {
|
||||
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes().Lister(), d.args.NodeSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(fmt.Errorf("Pod fails the following checks"), "pod", klog.KObj(pod))
|
||||
klog.ErrorS(err, "unable to list ready nodes", "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
if !nodeutil.PodFitsAnyOtherNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) {
|
||||
klog.ErrorS(fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable"), "pod", klog.KObj(pod))
|
||||
klog.InfoS("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable", "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -175,11 +216,6 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if utils.IsDaemonsetPod(ownerRefList) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod"))
|
||||
}
|
||||
|
||||
if utils.IsMirrorPod(pod) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
|
||||
}
|
||||
@@ -199,9 +235,28 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
if len(checkErrs) > 0 {
|
||||
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", errors.NewAggregate(checkErrs).Error())
|
||||
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", utilerrors.NewAggregate(checkErrs).Error())
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func getPodIndexerByOwnerRefs(indexName string, handle frameworktypes.Handle) (cache.Indexer, error) {
|
||||
podInformer := handle.SharedInformerFactory().Core().V1().Pods().Informer()
|
||||
if err := podInformer.AddIndexers(cache.Indexers{
|
||||
indexName: func(obj interface{}) ([]string, error) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return []string{}, errors.New("unexpected object")
|
||||
}
|
||||
|
||||
return podutil.OwnerRefUIDs(pod), nil
|
||||
},
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexer := podInformer.GetIndexer()
|
||||
return indexer, nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -363,6 +364,8 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
nodeTaintKey := "hardware"
|
||||
nodeTaintValue := "gpu"
|
||||
|
||||
ownerRefUUID := uuid.NewUUID()
|
||||
|
||||
type testCase struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
@@ -372,6 +375,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods bool
|
||||
priorityThreshold *int32
|
||||
nodeFit bool
|
||||
minReplicas uint
|
||||
result bool
|
||||
}
|
||||
|
||||
@@ -527,7 +531,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it is part of a daemonSet",
|
||||
description: "Pod not evicted because it is part of a daemonSet",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p8", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -538,7 +542,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it is part of a daemonSet, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it is part of a daemonSet, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p9", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
@@ -549,7 +553,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it is a mirror poddsa",
|
||||
description: "Pod not evicted because it is a mirror poddsa",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p10", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -560,7 +564,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it is a mirror pod, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it is a mirror pod, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p11", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -572,7 +576,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it has system critical priority",
|
||||
description: "Pod not evicted because it has system critical priority",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p12", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -584,7 +588,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has system critical priority, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it has system critical priority, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p13", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -599,7 +603,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it has a priority higher than the configured priority threshold",
|
||||
description: "Pod not evicted because it has a priority higher than the configured priority threshold",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p14", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -611,7 +615,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
priorityThreshold: &lowPriority,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it has a priority higher than the configured priority threshold, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p15", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -624,7 +628,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true",
|
||||
description: "Pod is evicted because it has system critical priority, but evictSystemCriticalPods = true",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -636,7 +640,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it has system critical priority, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -649,7 +653,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true",
|
||||
description: "Pod is evicted because it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -661,7 +665,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -704,6 +708,47 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "minReplicas of 2, owner with 2 replicas, evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
test.BuildTestPod("p2", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
},
|
||||
minReplicas: 2,
|
||||
result: true,
|
||||
}, {
|
||||
description: "minReplicas of 3, owner with 2 replicas, no eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
test.BuildTestPod("p2", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
},
|
||||
minReplicas: 3,
|
||||
result: false,
|
||||
}, {
|
||||
description: "minReplicas of 2, multiple owners, no eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = append(test.GetNormalPodOwnerRefList(), test.GetNormalPodOwnerRefList()...)
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
test.BuildTestPod("p2", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
},
|
||||
minReplicas: 2,
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -741,7 +786,8 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: test.priorityThreshold,
|
||||
},
|
||||
NodeFit: test.nodeFit,
|
||||
NodeFit: test.nodeFit,
|
||||
MinReplicas: test.minReplicas,
|
||||
}
|
||||
|
||||
evictorPlugin, err := New(
|
||||
|
||||
@@ -31,6 +31,9 @@ func SetDefaults_DefaultEvictorArgs(obj runtime.Object) {
|
||||
if !args.EvictLocalStoragePods {
|
||||
args.EvictLocalStoragePods = false
|
||||
}
|
||||
if !args.EvictDaemonSetPods {
|
||||
args.EvictDaemonSetPods = false
|
||||
}
|
||||
if !args.EvictSystemCriticalPods {
|
||||
args.EvictSystemCriticalPods = false
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
want: &DefaultEvictorArgs{
|
||||
NodeSelector: "",
|
||||
EvictLocalStoragePods: false,
|
||||
EvictDaemonSetPods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
@@ -48,6 +49,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
in: &DefaultEvictorArgs{
|
||||
NodeSelector: "NodeSelector",
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
EvictSystemCriticalPods: true,
|
||||
IgnorePvcPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
@@ -60,6 +62,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
want: &DefaultEvictorArgs{
|
||||
NodeSelector: "NodeSelector",
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
EvictSystemCriticalPods: true,
|
||||
IgnorePvcPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
|
||||
@@ -27,10 +27,12 @@ type DefaultEvictorArgs struct {
|
||||
|
||||
NodeSelector string `json:"nodeSelector"`
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods"`
|
||||
EvictDaemonSetPods bool `json:"evictDaemonSetPods"`
|
||||
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods"`
|
||||
IgnorePvcPods bool `json:"ignorePvcPods"`
|
||||
EvictFailedBarePods bool `json:"evictFailedBarePods"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold"`
|
||||
NodeFit bool `json:"nodeFit"`
|
||||
MinReplicas uint `json:"minReplicas"`
|
||||
}
|
||||
|
||||
@@ -16,15 +16,21 @@ package defaultevictor
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func ValidateDefaultEvictorArgs(obj runtime.Object) error {
|
||||
args := obj.(*DefaultEvictorArgs)
|
||||
|
||||
if args.PriorityThreshold != nil && len(args.PriorityThreshold.Name) > 0 {
|
||||
if args.PriorityThreshold != nil && args.PriorityThreshold.Value != nil && len(args.PriorityThreshold.Name) > 0 {
|
||||
return fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set, got %v", args)
|
||||
}
|
||||
|
||||
if args.MinReplicas == 1 {
|
||||
klog.V(4).Info("DefaultEvictor minReplicas must be greater than 1 to check for min pods during eviction. This check will be ignored during eviction.")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -144,6 +145,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
||||
sourceNodes,
|
||||
highNodes,
|
||||
h.handle.Evictor(),
|
||||
evictions.EvictOptions{StrategyName: HighNodeUtilizationPluginName},
|
||||
h.podFilter,
|
||||
resourceNames,
|
||||
continueEvictionCond)
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
@@ -191,6 +192,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
||||
sourceNodes,
|
||||
lowNodes,
|
||||
l.handle.Evictor(),
|
||||
evictions.EvictOptions{StrategyName: LowNodeUtilizationPluginName},
|
||||
l.podFilter,
|
||||
resourceNames,
|
||||
continueEvictionCond)
|
||||
|
||||
@@ -18,6 +18,7 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -163,6 +164,10 @@ func resourceThreshold(nodeCapacity v1.ResourceList, resourceName v1.ResourceNam
|
||||
return resource.NewQuantity(resourceCapacityFraction(resourceCapacityQuantity.Value()), defaultFormat)
|
||||
}
|
||||
|
||||
func roundTo2Decimals(percentage float64) float64 {
|
||||
return math.Round(percentage*100) / 100
|
||||
}
|
||||
|
||||
func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
|
||||
nodeCapacity := nodeUsage.node.Status.Capacity
|
||||
if len(nodeUsage.node.Status.Allocatable) > 0 {
|
||||
@@ -174,6 +179,7 @@ func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
|
||||
cap := nodeCapacity[resourceName]
|
||||
if !cap.IsZero() {
|
||||
resourceUsagePercentage[resourceName] = 100 * float64(resourceUsage.MilliValue()) / float64(cap.MilliValue())
|
||||
resourceUsagePercentage[resourceName] = roundTo2Decimals(resourceUsagePercentage[resourceName])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,6 +222,7 @@ func evictPodsFromSourceNodes(
|
||||
evictableNamespaces *api.Namespaces,
|
||||
sourceNodes, destinationNodes []NodeInfo,
|
||||
podEvictor frameworktypes.Evictor,
|
||||
evictOptions evictions.EvictOptions,
|
||||
podFilter func(pod *v1.Pod) bool,
|
||||
resourceNames []v1.ResourceName,
|
||||
continueEviction continueEvictionCond,
|
||||
@@ -267,7 +274,7 @@ func evictPodsFromSourceNodes(
|
||||
klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
|
||||
evictPods(ctx, evictableNamespaces, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, continueEviction)
|
||||
evictPods(ctx, evictableNamespaces, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, evictOptions, continueEviction)
|
||||
|
||||
}
|
||||
}
|
||||
@@ -280,11 +287,12 @@ func evictPods(
|
||||
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
|
||||
taintsOfLowNodes map[string][]v1.Taint,
|
||||
podEvictor frameworktypes.Evictor,
|
||||
evictOptions evictions.EvictOptions,
|
||||
continueEviction continueEvictionCond,
|
||||
) {
|
||||
var excludedNamespaces sets.String
|
||||
var excludedNamespaces sets.Set[string]
|
||||
if evictableNamespaces != nil {
|
||||
excludedNamespaces = sets.NewString(evictableNamespaces.Exclude...)
|
||||
excludedNamespaces = sets.New(evictableNamespaces.Exclude...)
|
||||
}
|
||||
|
||||
if continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
@@ -304,7 +312,7 @@ func evictPods(
|
||||
}
|
||||
|
||||
if preEvictionFilterWithOptions(pod) {
|
||||
if podEvictor.Evict(ctx, pod, evictions.EvictOptions{}) {
|
||||
if podEvictor.Evict(ctx, pod, evictOptions) {
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||
|
||||
for name := range totalAvailableUsage {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
@@ -49,10 +50,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil, fmt.Errorf("want args to be of type PodLifeTimeArgs, got %T", args)
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if podLifeTimeArgs.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(podLifeTimeArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(podLifeTimeArgs.Namespaces.Exclude...)
|
||||
includedNamespaces = sets.New(podLifeTimeArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.New(podLifeTimeArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
@@ -67,17 +68,24 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
}
|
||||
|
||||
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
||||
podAgeSeconds := uint(metav1.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
|
||||
return podAgeSeconds > *podLifeTimeArgs.MaxPodLifeTimeSeconds
|
||||
podAgeSeconds := int(metav1.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
|
||||
return podAgeSeconds > int(*podLifeTimeArgs.MaxPodLifeTimeSeconds)
|
||||
})
|
||||
|
||||
if len(podLifeTimeArgs.States) > 0 {
|
||||
states := sets.NewString(podLifeTimeArgs.States...)
|
||||
states := sets.New(podLifeTimeArgs.States...)
|
||||
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
||||
// Pod Status Phase
|
||||
if states.Has(string(pod.Status.Phase)) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Pod Status Reason
|
||||
if states.Has(pod.Status.Reason) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Container Status Reason
|
||||
for _, containerStatus := range pod.Status.ContainerStatuses {
|
||||
if containerStatus.State.Waiting != nil && states.Has(containerStatus.State.Waiting.Reason) {
|
||||
return true
|
||||
@@ -106,7 +114,7 @@ func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *framewo
|
||||
nodeMap := make(map[string]*v1.Node, len(nodes))
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
@@ -125,7 +133,7 @@ func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *framewo
|
||||
|
||||
for _, pod := range podsToEvict {
|
||||
if !d.handle.Evictor().NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) {
|
||||
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
||||
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{StrategyName: PluginName})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -28,6 +28,8 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/events"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
@@ -71,7 +73,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
p5.ObjectMeta.CreationTimestamp = newerPodCreationTime
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
|
||||
p6.Namespace = "dev"
|
||||
p6.ObjectMeta.CreationTimestamp = metav1.NewTime(time.Now().Add(time.Second * 605))
|
||||
p6.ObjectMeta.CreationTimestamp = metav1.NewTime(time.Now().Add(-time.Second * 605))
|
||||
|
||||
ownerRef3 := test.GetReplicaSetOwnerRefList()
|
||||
p5.ObjectMeta.OwnerReferences = ownerRef3
|
||||
@@ -83,7 +85,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
p7.ObjectMeta.CreationTimestamp = newerPodCreationTime
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
|
||||
p8.Namespace = "dev"
|
||||
p8.ObjectMeta.CreationTimestamp = metav1.NewTime(time.Now().Add(time.Second * 595))
|
||||
p8.ObjectMeta.CreationTimestamp = metav1.NewTime(time.Now().Add(-time.Second * 595))
|
||||
|
||||
ownerRef4 := test.GetReplicaSetOwnerRefList()
|
||||
p5.ObjectMeta.OwnerReferences = ownerRef4
|
||||
@@ -139,6 +141,12 @@ func TestPodLifeTime(t *testing.T) {
|
||||
p14.DeletionTimestamp = &metav1.Time{}
|
||||
p15.DeletionTimestamp = &metav1.Time{}
|
||||
|
||||
p16 := test.BuildTestPod("p16", 100, 0, node1.Name, nil)
|
||||
p16.Namespace = "dev"
|
||||
p16.ObjectMeta.CreationTimestamp = olderPodCreationTime
|
||||
p16.Status.Phase = v1.PodUnknown
|
||||
p16.ObjectMeta.OwnerReferences = ownerRef1
|
||||
|
||||
var maxLifeTime uint = 600
|
||||
testCases := []struct {
|
||||
description string
|
||||
@@ -149,6 +157,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
ignorePvcPods bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
applyPodsFunc func(pods []*v1.Pod)
|
||||
}{
|
||||
{
|
||||
description: "Two pods in the `dev` Namespace, 1 is new and 1 very is old. 1 should be evicted.",
|
||||
@@ -303,7 +312,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
},
|
||||
pods: []*v1.Pod{p1, p2, p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
maxPodsToEvictPerNamespace: func(i uint) *uint { return &i }(1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
@@ -313,9 +322,207 @@ func TestPodLifeTime(t *testing.T) {
|
||||
},
|
||||
pods: []*v1.Pod{p1, p2, p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
maxPodsToEvictPerNode: func(i uint) *uint { return &i }(1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "1 pod with container status ImagePullBackOff should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"ImagePullBackOff"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.ContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "ImagePullBackOff"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with container status CrashLoopBackOff should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"CrashLoopBackOff"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.ContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with container status CreateContainerConfigError should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"CreateContainerConfigError"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.ContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerConfigError"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with container status ErrImagePull should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"ErrImagePull"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.ContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "ErrImagePull"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with container status CreateContainerError should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"CreateContainerError"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.ContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerError"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with container status InvalidImageName should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"InvalidImageName"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.ContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "InvalidImageName"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with pod status reason NodeLost should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"NodeLost"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.Reason = "NodeLost"
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with pod status reason NodeAffinity should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"NodeAffinity"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.Reason = "NodeAffinity"
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with pod status reason Shutdown should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"Shutdown"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.Reason = "Shutdown"
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with pod status reason UnexpectedAdmissionError should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"UnexpectedAdmissionError"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.Reason = "UnexpectedAdmissionError"
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with pod status phase v1.PodUnknown should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{string(v1.PodUnknown)},
|
||||
},
|
||||
pods: []*v1.Pod{p16},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.Phase = v1.PodUnknown
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod without ImagePullBackOff States should be ignored",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"ContainerCreating"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.ContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "ImagePullBackOff"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -323,6 +530,10 @@ func TestPodLifeTime(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
if tc.applyPodsFunc != nil {
|
||||
tc.applyPodsFunc(tc.pods)
|
||||
}
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range tc.nodes {
|
||||
objs = append(objs, node)
|
||||
|
||||
@@ -43,17 +43,34 @@ func ValidatePodLifeTimeArgs(obj runtime.Object) error {
|
||||
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
|
||||
}
|
||||
}
|
||||
podLifeTimeAllowedStates := sets.NewString(
|
||||
podLifeTimeAllowedStates := sets.New(
|
||||
// Pod Status Phase
|
||||
string(v1.PodRunning),
|
||||
string(v1.PodPending),
|
||||
string(v1.PodUnknown),
|
||||
|
||||
// Pod Status Reasons
|
||||
"NodeAffinity",
|
||||
"NodeLost",
|
||||
"Shutdown",
|
||||
"UnexpectedAdmissionError",
|
||||
|
||||
// Container Status Reasons
|
||||
// Container state reasons: https://github.com/kubernetes/kubernetes/blob/release-1.24/pkg/kubelet/kubelet_pods.go#L76-L79
|
||||
"PodInitializing",
|
||||
"ContainerCreating",
|
||||
|
||||
// containerStatuses[*].state.waiting.reason: ImagePullBackOff, etc.
|
||||
"ImagePullBackOff",
|
||||
"CrashLoopBackOff",
|
||||
"CreateContainerConfigError",
|
||||
"ErrImagePull",
|
||||
"CreateContainerError",
|
||||
"InvalidImageName",
|
||||
)
|
||||
|
||||
if !podLifeTimeAllowedStates.HasAll(args.States...) {
|
||||
return fmt.Errorf("states must be one of %v", podLifeTimeAllowedStates.List())
|
||||
return fmt.Errorf("states must be one of %v", podLifeTimeAllowedStates.UnsortedList())
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -36,6 +36,14 @@ func TestValidateRemovePodLifeTimeArgs(t *testing.T) {
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "Pod Status Reasons CrashLoopBackOff ",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
|
||||
States: []string{"CrashLoopBackOff"},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "nil MaxPodLifeTimeSeconds arg, expects errors",
|
||||
args: &PodLifeTimeArgs{
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -57,6 +57,10 @@ type podOwner struct {
|
||||
imagesHash string
|
||||
}
|
||||
|
||||
func (po podOwner) String() string {
|
||||
return fmt.Sprintf("%s/%s/%s/%s", po.namespace, po.kind, po.name, po.imagesHash)
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
removeDuplicatesArgs, ok := args.(*RemoveDuplicatesArgs)
|
||||
@@ -64,10 +68,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil, fmt.Errorf("want args to be of type RemoveDuplicatesArgs, got %T", args)
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if removeDuplicatesArgs.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(removeDuplicatesArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(removeDuplicatesArgs.Namespaces.Exclude...)
|
||||
includedNamespaces = sets.New(removeDuplicatesArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.New(removeDuplicatesArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
@@ -100,7 +104,7 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
|
||||
nodeMap := make(map[string]*v1.Node)
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, r.handle.GetPodsAssignedToNodeFunc(), r.podFilter)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error listing evictable pods on node", "node", klog.KObj(node))
|
||||
@@ -206,7 +210,7 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
|
||||
// It's assumed all duplicated pods are in the same priority class
|
||||
// TODO(jchaloup): check if the pod has a different node to lend to
|
||||
for _, pod := range pods[upperAvg-1:] {
|
||||
r.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
||||
r.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{StrategyName: PluginName})
|
||||
if r.handle.Evictor().NodeLimitExceeded(nodeMap[nodeName]) {
|
||||
continue loop
|
||||
}
|
||||
@@ -234,7 +238,7 @@ func getTargetNodes(podNodes map[string][]*v1.Pod, nodes []*v1.Node) []*v1.Node
|
||||
utils.NodeSelectorsEqual(getNodeAffinityNodeSelector(pod), getNodeAffinityNodeSelector(dp)) &&
|
||||
reflect.DeepEqual(pod.Spec.NodeSelector, dp.Spec.NodeSelector) {
|
||||
duplicated = true
|
||||
continue
|
||||
break
|
||||
}
|
||||
}
|
||||
if duplicated {
|
||||
@@ -279,7 +283,7 @@ func hasExcludedOwnerRefKind(ownerRefs []metav1.OwnerReference, excludeOwnerKind
|
||||
return false
|
||||
}
|
||||
|
||||
exclude := sets.NewString(excludeOwnerKinds...)
|
||||
exclude := sets.New(excludeOwnerKinds...)
|
||||
for _, owner := range ownerRefs {
|
||||
if exclude.Has(owner.Kind) {
|
||||
return true
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -50,10 +50,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil, fmt.Errorf("want args to be of type RemoveFailedPodsArgs, got %T", args)
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if failedPodsArgs.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(failedPodsArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(failedPodsArgs.Namespaces.Exclude...)
|
||||
includedNamespaces = sets.New(failedPodsArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.New(failedPodsArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
@@ -93,7 +93,7 @@ func (d *RemoveFailedPods) Name() string {
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
@@ -103,7 +103,7 @@ func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *fr
|
||||
}
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{})
|
||||
d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{StrategyName: PluginName})
|
||||
if d.handle.Evictor().NodeLimitExceeded(node) {
|
||||
break
|
||||
}
|
||||
@@ -126,7 +126,7 @@ func validateCanEvict(pod *v1.Pod, failedPodArgs *RemoveFailedPodsArgs) error {
|
||||
if len(failedPodArgs.ExcludeOwnerKinds) > 0 {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
for _, owner := range ownerRefList {
|
||||
if sets.NewString(failedPodArgs.ExcludeOwnerKinds...).Has(owner.Kind) {
|
||||
if sets.New(failedPodArgs.ExcludeOwnerKinds...).Has(owner.Kind) {
|
||||
errs = append(errs, fmt.Errorf("pod's owner kind of %s is excluded", owner.Kind))
|
||||
}
|
||||
}
|
||||
@@ -143,11 +143,22 @@ func validateCanEvict(pod *v1.Pod, failedPodArgs *RemoveFailedPodsArgs) error {
|
||||
reasons = append(reasons, getFailedContainerStatusReasons(pod.Status.InitContainerStatuses)...)
|
||||
}
|
||||
|
||||
if !sets.NewString(failedPodArgs.Reasons...).HasAny(reasons...) {
|
||||
if !sets.New(failedPodArgs.Reasons...).HasAny(reasons...) {
|
||||
errs = append(errs, fmt.Errorf("pod does not match any of the reasons"))
|
||||
}
|
||||
}
|
||||
|
||||
if len(failedPodArgs.ExitCodes) > 0 {
|
||||
exitCodes := getFailedContainerStatusExitCodes(pod.Status.ContainerStatuses)
|
||||
if failedPodArgs.IncludingInitContainers {
|
||||
exitCodes = append(exitCodes, getFailedContainerStatusExitCodes(pod.Status.InitContainerStatuses)...)
|
||||
}
|
||||
|
||||
if !sets.New(failedPodArgs.ExitCodes...).HasAny(exitCodes...) {
|
||||
errs = append(errs, fmt.Errorf("pod does not match any of the exitCodes"))
|
||||
}
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
@@ -165,3 +176,15 @@ func getFailedContainerStatusReasons(containerStatuses []v1.ContainerStatus) []s
|
||||
|
||||
return reasons
|
||||
}
|
||||
|
||||
func getFailedContainerStatusExitCodes(containerStatuses []v1.ContainerStatus) []int32 {
|
||||
exitCodes := make([]int32, 0)
|
||||
|
||||
for _, containerStatus := range containerStatuses {
|
||||
if containerStatus.State.Terminated != nil {
|
||||
exitCodes = append(exitCodes, containerStatus.State.Terminated.ExitCode)
|
||||
}
|
||||
}
|
||||
|
||||
return exitCodes
|
||||
}
|
||||
|
||||
@@ -41,12 +41,15 @@ var OneHourInSeconds uint = 3600
|
||||
func TestRemoveFailedPods(t *testing.T) {
|
||||
createRemoveFailedPodsArgs := func(
|
||||
includingInitContainers bool,
|
||||
reasons, excludeKinds []string,
|
||||
reasons []string,
|
||||
exitCodes []int32,
|
||||
excludeKinds []string,
|
||||
minAgeSeconds *uint,
|
||||
) RemoveFailedPodsArgs {
|
||||
return RemoveFailedPodsArgs{
|
||||
IncludingInitContainers: includingInitContainers,
|
||||
Reasons: reasons,
|
||||
ExitCodes: exitCodes,
|
||||
MinPodLifetimeSeconds: minAgeSeconds,
|
||||
ExcludeOwnerKinds: excludeKinds,
|
||||
}
|
||||
@@ -69,14 +72,14 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "0 failures, 0 evictions",
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{}, // no pods come back with field selector phase=Failed
|
||||
},
|
||||
{
|
||||
description: "1 container terminated with reason NodeAffinity, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
@@ -85,9 +88,70 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 container terminated with exitCode 1, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, nil, []int32{1}, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 1},
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 container terminated with exitCode 1, reason NodeAffinity, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, nil, []int32{1}, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity", ExitCode: 1},
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 container terminated with exitCode 1, expected exitCode 2, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, nil, []int32{2}, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 1},
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "2 containers terminated with exitCode 1 and 2 respectively, 2 evictions",
|
||||
args: createRemoveFailedPodsArgs(false, nil, []int32{1, 2, 3}, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 2,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 1},
|
||||
}), nil),
|
||||
buildTestPod("p2", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 2},
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "2 containers terminated with exitCode 1 and 2 respectively, expected exitCode 1, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, nil, []int32{1}, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 1},
|
||||
}), nil),
|
||||
buildTestPod("p2", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 2},
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 init container terminated with reason NodeAffinity, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
@@ -96,9 +160,31 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
}, nil), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 init container terminated with exitCode 1, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(true, nil, []int32{1}, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 1},
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 init container terminated with exitCode 2, expected 1, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(true, nil, []int32{1}, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 2},
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 init container waiting with reason CreateContainerConfigError, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
@@ -109,7 +195,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "2 init container waiting with reason CreateContainerConfigError, 2 nodes, 2 evictions",
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, nil, nil),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("node2", 2000, 3000, 10, nil),
|
||||
@@ -126,7 +212,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "include reason=CreateContainerConfigError, 1 container terminated with reason CreateContainerConfigError, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
@@ -137,7 +223,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "include reason=CreateContainerConfigError+NodeAffinity, 1 container terminated with reason CreateContainerConfigError, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError", "NodeAffinity"}, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError", "NodeAffinity"}, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
@@ -148,7 +234,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "include reason=CreateContainerConfigError, 1 container terminated with reason NodeAffinity, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
@@ -159,7 +245,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "include init container=false, 1 init container waiting with reason CreateContainerConfigError, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
@@ -170,7 +256,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "lifetime 1 hour, 1 container terminated with reason NodeAffinity, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, &OneHourInSeconds),
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil, &OneHourInSeconds),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
@@ -181,7 +267,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "nodeFit=true, 1 unschedulable node, 1 container terminated with reason NodeAffinity, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil, nil),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("node2", 2000, 2000, 10, func(node *v1.Node) {
|
||||
@@ -198,7 +284,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "nodeFit=true, only available node does not have enough resources, 1 container terminated with reason CreateContainerConfigError, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 1, 1, 10, nil), test.BuildTestNode("node2", 0, 0, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
@@ -210,7 +296,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "excluded owner kind=ReplicaSet, 1 init container terminated with owner kind=ReplicaSet, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(true, nil, []string{"ReplicaSet"}, nil),
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, []string{"ReplicaSet"}, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
@@ -221,7 +307,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "excluded owner kind=DaemonSet, 1 init container terminated with owner kind=ReplicaSet, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(true, nil, []string{"DaemonSet"}, nil),
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, []string{"DaemonSet"}, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
@@ -232,7 +318,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "excluded owner kind=DaemonSet, 1 init container terminated with owner kind=ReplicaSet, 1 pod in termination; nothing should be moved",
|
||||
args: createRemoveFailedPodsArgs(true, nil, []string{"DaemonSet"}, nil),
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, []string{"DaemonSet"}, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
@@ -243,7 +329,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "1 container terminated with reason ShutDown, 0 evictions",
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
@@ -253,7 +339,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "include reason=Shutdown, 2 containers terminated with reason ShutDown, 2 evictions",
|
||||
args: createRemoveFailedPodsArgs(false, []string{"Shutdown"}, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, []string{"Shutdown"}, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 2,
|
||||
pods: []*v1.Pod{
|
||||
@@ -322,6 +408,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
|
||||
plugin, err := New(&RemoveFailedPodsArgs{
|
||||
Reasons: tc.args.Reasons,
|
||||
ExitCodes: tc.args.ExitCodes,
|
||||
MinPodLifetimeSeconds: tc.args.MinPodLifetimeSeconds,
|
||||
IncludingInitContainers: tc.args.IncludingInitContainers,
|
||||
ExcludeOwnerKinds: tc.args.ExcludeOwnerKinds,
|
||||
|
||||
@@ -30,5 +30,6 @@ type RemoveFailedPodsArgs struct {
|
||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds"`
|
||||
MinPodLifetimeSeconds *uint `json:"minPodLifetimeSeconds"`
|
||||
Reasons []string `json:"reasons"`
|
||||
ExitCodes []int32 `json:"exitCodes"`
|
||||
IncludingInitContainers bool `json:"includingInitContainers"`
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -56,6 +56,11 @@ func (in *RemoveFailedPodsArgs) DeepCopyInto(out *RemoveFailedPodsArgs) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ExitCodes != nil {
|
||||
in, out := &in.ExitCodes, &out.ExitCodes
|
||||
*out = make([]int32, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -37,4 +37,7 @@ func SetDefaults_RemovePodsHavingTooManyRestartsArgs(obj runtime.Object) {
|
||||
if !args.IncludingInitContainers {
|
||||
args.IncludingInitContainers = false
|
||||
}
|
||||
if args.States == nil {
|
||||
args.States = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
@@ -37,6 +38,7 @@ func TestSetDefaults_RemovePodsHavingTooManyRestartsArgs(t *testing.T) {
|
||||
LabelSelector: nil,
|
||||
PodRestartThreshold: 0,
|
||||
IncludingInitContainers: false,
|
||||
States: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -46,12 +48,14 @@ func TestSetDefaults_RemovePodsHavingTooManyRestartsArgs(t *testing.T) {
|
||||
LabelSelector: &metav1.LabelSelector{},
|
||||
PodRestartThreshold: 10,
|
||||
IncludingInitContainers: true,
|
||||
States: []string{string(v1.PodRunning)},
|
||||
},
|
||||
want: &RemovePodsHavingTooManyRestartsArgs{
|
||||
Namespaces: &api.Namespaces{},
|
||||
LabelSelector: &metav1.LabelSelector{},
|
||||
PodRestartThreshold: 10,
|
||||
IncludingInitContainers: true,
|
||||
States: []string{string(v1.PodRunning)},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -50,10 +50,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsHavingTooManyRestartsArgs, got %T", args)
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if tooManyRestartsArgs.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(tooManyRestartsArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(tooManyRestartsArgs.Namespaces.Exclude...)
|
||||
includedNamespaces = sets.New(tooManyRestartsArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.New(tooManyRestartsArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
@@ -75,6 +75,29 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return true
|
||||
})
|
||||
|
||||
if len(tooManyRestartsArgs.States) > 0 {
|
||||
states := sets.New(tooManyRestartsArgs.States...)
|
||||
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
||||
if states.Has(string(pod.Status.Phase)) {
|
||||
return true
|
||||
}
|
||||
|
||||
containerStatuses := pod.Status.ContainerStatuses
|
||||
|
||||
if tooManyRestartsArgs.IncludingInitContainers {
|
||||
containerStatuses = append(containerStatuses, pod.Status.InitContainerStatuses...)
|
||||
}
|
||||
|
||||
for _, containerStatus := range containerStatuses {
|
||||
if containerStatus.State.Waiting != nil && states.Has(containerStatus.State.Waiting.Reason) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
return &RemovePodsHavingTooManyRestarts{
|
||||
handle: handle,
|
||||
args: tooManyRestartsArgs,
|
||||
@@ -90,7 +113,7 @@ func (d *RemovePodsHavingTooManyRestarts) Name() string {
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
@@ -100,7 +123,7 @@ func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes
|
||||
}
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{})
|
||||
d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{StrategyName: PluginName})
|
||||
if d.handle.Evictor().NodeLimitExceeded(node) {
|
||||
break
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user