mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 21:31:18 +01:00
Compare commits
111 Commits
deschedule
...
v0.28.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
90e7accd57 | ||
|
|
708b734ad6 | ||
|
|
ddd7cd1c13 | ||
|
|
0fcedb6e96 | ||
|
|
6eb44a1313 | ||
|
|
308309d6d2 | ||
|
|
c8d4658277 | ||
|
|
6f0cd50dc0 | ||
|
|
89f453ebb3 | ||
|
|
783304f248 | ||
|
|
caafe96ce7 | ||
|
|
2a05825b38 | ||
|
|
2fee121df3 | ||
|
|
c3424337c8 | ||
|
|
e217952707 | ||
|
|
f4977d1518 | ||
|
|
96c1c299eb | ||
|
|
ccff5fe155 | ||
|
|
289c003f87 | ||
|
|
c4ab2008e4 | ||
|
|
19aa8a234c | ||
|
|
bf2bd73f64 | ||
|
|
704a82bcf4 | ||
|
|
a01322a5e5 | ||
|
|
30aec81951 | ||
|
|
d8159c571d | ||
|
|
c59a0f0951 | ||
|
|
999dd3dc25 | ||
|
|
a243681ff8 | ||
|
|
73eb42467a | ||
|
|
b8eec7a459 | ||
|
|
a7d8e69820 | ||
|
|
3bd9dfc625 | ||
|
|
dca2e58b8e | ||
|
|
267efb3a4b | ||
|
|
c0d60196a1 | ||
|
|
2e1008e290 | ||
|
|
714a347692 | ||
|
|
af4b32c6ba | ||
|
|
c602d1256f | ||
|
|
a592cbc417 | ||
|
|
c557d187ab | ||
|
|
8af1c2b157 | ||
|
|
e6773e77fd | ||
|
|
e8427d0eb4 | ||
|
|
c0f9761a61 | ||
|
|
e1dc63bcc7 | ||
|
|
8a458e0dcb | ||
|
|
99246cd254 | ||
|
|
33e9a52385 | ||
|
|
b833607504 | ||
|
|
bae120929e | ||
|
|
30dd78dcee | ||
|
|
1d4dc57ad1 | ||
|
|
3f0c06b58d | ||
|
|
bb4721049f | ||
|
|
78c9ae851c | ||
|
|
9660a7b469 | ||
|
|
81aa897c48 | ||
|
|
f4c64c2c75 | ||
|
|
ca5781827a | ||
|
|
31704047c5 | ||
|
|
1be0ab2bd1 | ||
|
|
f8442fbb0d | ||
|
|
f7b7f50b92 | ||
|
|
27a436b98a | ||
|
|
8f7dea10c1 | ||
|
|
68f43fe591 | ||
|
|
a0cbfcfbca | ||
|
|
2a91eda30d | ||
|
|
06dc26a83e | ||
|
|
add9d6e897 | ||
|
|
d9b763a28b | ||
|
|
3ff38bab59 | ||
|
|
ed1554dd19 | ||
|
|
931aac9c71 | ||
|
|
a497541f39 | ||
|
|
333b5cfbb6 | ||
|
|
eb2372137e | ||
|
|
7f2f6f2b16 | ||
|
|
5f0edb5f93 | ||
|
|
f5a7f716b3 | ||
|
|
5163ac3ace | ||
|
|
4c272e6ea2 | ||
|
|
c637b7f0e6 | ||
|
|
5462a599af | ||
|
|
7dcd7fa50f | ||
|
|
6271d51125 | ||
|
|
0bdbf51eb2 | ||
|
|
9aad51f328 | ||
|
|
699297711a | ||
|
|
1b976529bc | ||
|
|
877d9b18ee | ||
|
|
b58ec3b458 | ||
|
|
e94a6f129a | ||
|
|
1b22e102ce | ||
|
|
a35e3f49b4 | ||
|
|
9313041d77 | ||
|
|
83c81e6b58 | ||
|
|
eb356fe6ff | ||
|
|
a62bb54a3a | ||
|
|
92017d0fdd | ||
|
|
476bee3424 | ||
|
|
b630ecaaf7 | ||
|
|
359b38a34c | ||
|
|
2c525afc76 | ||
|
|
e2826fa66c | ||
|
|
0f5a5a2235 | ||
|
|
1f6a736aab | ||
|
|
8cbbe5501b | ||
|
|
dd63aac88c |
6
.gitattributes
vendored
Normal file
6
.gitattributes
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
# Always check-out / check-in files with LF line endings.
|
||||
* text=auto eol=lf
|
||||
|
||||
go.sum merge=union
|
||||
**/zz_generated.*.go linguist-generated=true
|
||||
vendor/** linguist-vendored
|
||||
4
.github/workflows/helm.yaml
vendored
4
.github/workflows/helm.yaml
vendored
@@ -35,7 +35,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20.3'
|
||||
go-version: '1.20.7'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.2.1
|
||||
@@ -47,7 +47,7 @@ jobs:
|
||||
run: |
|
||||
changed=$(ct list-changed --config=.github/ci/ct.yaml)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "::set-output name=changed::true"
|
||||
echo "changed=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
|
||||
4
.github/workflows/manifests.yaml
vendored
4
.github/workflows/manifests.yaml
vendored
@@ -7,8 +7,8 @@ jobs:
|
||||
deploy:
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ["v1.26.0"]
|
||||
descheduler-version: ["v0.26.1"]
|
||||
k8s-version: ["v1.28.0"]
|
||||
descheduler-version: ["v0.28.1"]
|
||||
descheduler-api: ["v1alpha1", "v1alpha2"]
|
||||
manifest: ["deployment"]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
2
.github/workflows/release.yaml
vendored
2
.github/workflows/release.yaml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: v3.4.0
|
||||
version: v3.7.0
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.1.0
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
run:
|
||||
deadline: 2m
|
||||
timeout: 2m
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
@@ -15,4 +15,4 @@ linters-settings:
|
||||
gofumpt:
|
||||
extra-rules: true
|
||||
goimports:
|
||||
local-prefixes: sigs.k8s.io/descheduler
|
||||
local-prefixes: sigs.k8s.io/descheduler
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.20.3
|
||||
FROM golang:1.20.7
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
|
||||
7
Makefile
7
Makefile
@@ -43,6 +43,10 @@ IMAGE:=descheduler:$(VERSION)
|
||||
# IMAGE_GCLOUD is the image name of descheduler in the remote registry
|
||||
IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
|
||||
|
||||
# CURRENT_DIR is the current dir where the Makefile exists
|
||||
CURRENT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
|
||||
# TODO: upload binaries to GCS bucket
|
||||
#
|
||||
# In the future binaries can be uploaded to
|
||||
@@ -129,6 +133,9 @@ gen:
|
||||
./hack/update-generated-defaulters.sh
|
||||
./hack/update-docs.sh
|
||||
|
||||
gen-docker:
|
||||
$(CONTAINER_ENGINE) run --entrypoint make -it -v $(CURRENT_DIR):/go/src/sigs.k8s.io/descheduler -w /go/src/sigs.k8s.io/descheduler golang:1.20.7 gen
|
||||
|
||||
verify-gen:
|
||||
./hack/verify-conversions.sh
|
||||
./hack/verify-deep-copies.sh
|
||||
|
||||
1
OWNERS
1
OWNERS
@@ -12,6 +12,7 @@ reviewers:
|
||||
- a7i
|
||||
- janeliul
|
||||
- knelasevero
|
||||
- jklaw90
|
||||
emeritus_approvers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
|
||||
64
README.md
64
README.md
@@ -38,6 +38,7 @@ that version's release branch, as listed below:
|
||||
|
||||
|Descheduler Version|Docs link|
|
||||
|---|---|
|
||||
|v0.28.x|[`release-1.28`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.28/README.md)|
|
||||
|v0.27.x|[`release-1.27`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.27/README.md)|
|
||||
|v0.26.x|[`release-1.26`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.26/README.md)|
|
||||
|v0.25.x|[`release-1.25`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.25/README.md)|
|
||||
@@ -112,7 +113,7 @@ See the [user guide](docs/user-guide.md) in the `/docs` directory.
|
||||
|
||||
## Policy, Default Evictor and Strategy plugins
|
||||
|
||||
**⚠️ v1alpha1 configuration is still suported, but deprecated (and soon will be removed). Please consider migrating to v1alpha2 (described bellow). For previous v1alpha1 documentation go to [docs/deprecated/v1alpha1.md](docs/deprecated/v1alpha1.md) ⚠️**
|
||||
**⚠️ v1alpha1 configuration is still supported, but deprecated (and soon will be removed). Please consider migrating to v1alpha2 (described bellow). For previous v1alpha1 documentation go to [docs/deprecated/v1alpha1.md](docs/deprecated/v1alpha1.md) ⚠️**
|
||||
|
||||
The Descheduler Policy is configurable and includes default strategy plugins that can be enabled or disabled. It includes a common eviction configuration at the top level, as well as configuration from the Evictor plugin (Default Evictor, if not specified otherwise). Top-level configuration and Evictor plugin configuration are applied to all evictions.
|
||||
|
||||
@@ -201,6 +202,7 @@ Balance Plugins: These plugins process all pods, or groups of pods, and determin
|
||||
| [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity) |Deschedule|Evicts pods violating node affinity|
|
||||
| [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints) |Deschedule|Evicts pods violating node taints|
|
||||
| [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint) |Balance|Evicts pods violating TopologySpreadConstraints|
|
||||
| [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts) |Deschedule|Evicts pods having too many restarts|
|
||||
| [PodLifeTime](#podlifetime) |Deschedule|Evicts pods that have exceeded a specified age limit|
|
||||
| [RemoveFailedPods](#removefailedpods) |Deschedule|Evicts pods with certain failed reasons|
|
||||
|
||||
@@ -379,10 +381,9 @@ profiles:
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
evictableNamespaces:
|
||||
namespaces:
|
||||
exclude:
|
||||
- "kube-system"
|
||||
- "namespace1"
|
||||
exclude:
|
||||
- "kube-system"
|
||||
- "namespace1"
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
@@ -435,7 +436,10 @@ profiles:
|
||||
This strategy makes sure all pods violating
|
||||
[node affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity)
|
||||
are eventually removed from nodes. Node affinity rules allow a pod to specify
|
||||
`requiredDuringSchedulingIgnoredDuringExecution` type, which tells the scheduler
|
||||
`requiredDuringSchedulingIgnoredDuringExecution` and/or
|
||||
`preferredDuringSchedulingIgnoredDuringExecution`.
|
||||
|
||||
The `requiredDuringSchedulingIgnoredDuringExecution` type tells the scheduler
|
||||
to respect node affinity when scheduling the pod but kubelet to ignore
|
||||
in case node changes over time and no longer respects the affinity.
|
||||
When enabled, the strategy serves as a temporary implementation
|
||||
@@ -448,6 +452,14 @@ of scheduling. Over time nodeA stops to satisfy the rule. When the strategy gets
|
||||
executed and there is another node available that satisfies the node affinity rule,
|
||||
podA gets evicted from nodeA.
|
||||
|
||||
The `preferredDuringSchedulingIgnoredDuringExecution` type tells the scheduler
|
||||
to respect node affinity when scheduling if that's possible. If not, the pod
|
||||
gets scheduled anyway. It may happen that, over time, the state of the cluster
|
||||
changes and now the pod can be scheduled on a node that actually fits its
|
||||
preferred node affinity. When enabled, the strategy serves as a temporary
|
||||
implementation of `preferredDuringSchedulingPreferredDuringExecution`, so the
|
||||
pod will be evicted if it can be scheduled on a "better" node.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
@@ -521,18 +533,41 @@ This strategy makes sure that pods violating [topology spread constraints](https
|
||||
are evicted from nodes. Specifically, it tries to evict the minimum number of pods required to balance topology domains to within each constraint's `maxSkew`.
|
||||
This strategy requires k8s version 1.18 at a minimum.
|
||||
|
||||
By default, this strategy only deals with hard constraints, setting parameter `includeSoftConstraints` to `true` will
|
||||
include soft constraints.
|
||||
By default, this strategy only includes hard constraints, you can explicitly set `constraints` as shown below to include both:
|
||||
```yaml
|
||||
constraints:
|
||||
- DoNotSchedule
|
||||
- ScheduleAnyway
|
||||
```
|
||||
|
||||
The `topologyBalanceNodeFit` arg is used when balancing topology domains while the Default Evictor's `nodeFit` is used in pre-eviction to determine if a pod can be evicted.
|
||||
```yaml
|
||||
topologyBalanceNodeFit: false
|
||||
```
|
||||
|
||||
Strategy parameter `labelSelector` is not utilized when balancing topology domains and is only applied during eviction to determine if the pod can be evicted.
|
||||
|
||||
[Supported Constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/#spread-constraint-definition) fields:
|
||||
|
||||
|Name|Supported?|
|
||||
|----|----------|
|
||||
|`maxSkew`|Yes|
|
||||
|`minDomains`|No|
|
||||
|`topologyKey`|Yes|
|
||||
|`whenUnsatisfiable`|Yes|
|
||||
|`labelSelector`|Yes|
|
||||
|`matchLabelKeys`|Yes|
|
||||
|`nodeAffinityPolicy`|Yes|
|
||||
|`nodeTaintsPolicy`|Yes|
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`includeSoftConstraints`|bool|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`constraints`|(see [whenUnsatisfiable](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core))||
|
||||
|`topologyBalanceNodeFit`|bool|default `true`. [node fit filtering](#node-fit-filtering) when balancing topology domains|
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -544,7 +579,8 @@ profiles:
|
||||
pluginConfig:
|
||||
- name: "RemovePodsViolatingTopologySpreadConstraint"
|
||||
args:
|
||||
includeSoftConstraints: false
|
||||
constraints:
|
||||
- DoNotSchedule
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
@@ -560,6 +596,13 @@ include `podRestartThreshold`, which is the number of restarts (summed over all
|
||||
should be evicted, and `includingInitContainers`, which determines whether init container restarts should be factored
|
||||
into that calculation.
|
||||
|
||||
You can also specify `states` parameter to **only** evict pods matching the following conditions:
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`
|
||||
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) of: `CrashLoopBackOff`
|
||||
|
||||
If a value for `states` or `podStatusPhases` is not specified,
|
||||
Pods in any state (even `Running`) are considered for eviction.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
@@ -568,6 +611,7 @@ into that calculation.
|
||||
|`includingInitContainers`|bool|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`states`|list(string)|Only supported in v0.28+|
|
||||
|
||||
**Example:**
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.27.0
|
||||
appVersion: 0.27.0
|
||||
version: 0.28.1
|
||||
appVersion: 0.28.1
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
|
||||
@@ -55,6 +55,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
|
||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||
| `timeZone` | configure `timeZone` for CronJob | `nil` |
|
||||
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `3` |
|
||||
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `1` |
|
||||
| `ttlSecondsAfterFinished` | If set, configure `ttlSecondsAfterFinished` for the _descheduler_ job | `nil` |
|
||||
@@ -75,6 +76,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `service.enabled` | If `true`, create a service for deployment | `false` |
|
||||
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
|
||||
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
|
||||
| `serviceMonitor.additionalLabels` | Add custom labels to the ServiceMonitor resource | `{}` |
|
||||
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
|
||||
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
|
||||
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
|
||||
|
||||
@@ -21,6 +21,9 @@ spec:
|
||||
{{- if .Values.failedJobsHistoryLimit }}
|
||||
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if .Values.timeZone }}
|
||||
timeZone: {{ .Values.timeZone }}
|
||||
{{- end }}
|
||||
jobTemplate:
|
||||
spec:
|
||||
{{- if .Values.ttlSecondsAfterFinished }}
|
||||
@@ -48,6 +51,10 @@ spec:
|
||||
affinity:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- .Values.dnsConfig | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
@@ -66,15 +73,11 @@ spec:
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
{{- toYaml .Values.command | nindent 16 }}
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- --policy-config-file=/policy-dir/policy.yaml
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
- {{ printf "--%s" $key | quote }}
|
||||
{{- if $value }}
|
||||
- {{ $value | quote }}
|
||||
{{- end }}
|
||||
- {{ printf "--%s" $key }}{{ if $value }}={{ $value }}{{ end }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 16 }}
|
||||
|
||||
@@ -31,30 +31,29 @@ spec:
|
||||
{{- .Values.podAnnotations | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- .Values.dnsConfig | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
{{- toYaml .Values.command | nindent 12 }}
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--descheduling-interval"
|
||||
- {{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
|
||||
- --policy-config-file=/policy-dir/policy.yaml
|
||||
- --descheduling-interval={{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
- {{ printf "--%s" $key | quote }}
|
||||
{{- if $value }}
|
||||
- {{ $value | quote }}
|
||||
{{- end }}
|
||||
- {{ printf "--%s" $key }}{{ if $value }}={{ $value }}{{ end }}
|
||||
{{- end }}
|
||||
{{- include "descheduler.leaderElection" . | nindent 12 }}
|
||||
ports:
|
||||
|
||||
@@ -7,6 +7,9 @@ metadata:
|
||||
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
jobLabel: jobLabel
|
||||
namespaceSelector:
|
||||
|
||||
@@ -45,6 +45,7 @@ suspend: false
|
||||
# successfulJobsHistoryLimit: 3
|
||||
# failedJobsHistoryLimit: 1
|
||||
# ttlSecondsAfterFinished 600
|
||||
# timeZone: Etc/UTC
|
||||
|
||||
# Required when running as a Deployment
|
||||
deschedulingInterval: 5m
|
||||
@@ -67,6 +68,9 @@ leaderElection: {}
|
||||
# resourceName: "descheduler"
|
||||
# resourceNamescape: "kube-system"
|
||||
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
cmdOptions:
|
||||
v: 3
|
||||
|
||||
@@ -79,6 +83,13 @@ deschedulerPolicy:
|
||||
# maxNoOfPodsToEvictPerNamespace: 10
|
||||
# ignorePvcPods: true
|
||||
# evictLocalStoragePods: true
|
||||
# tracing:
|
||||
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
|
||||
# transportCert: ""
|
||||
# serviceName: ""
|
||||
# serviceNamespace: ""
|
||||
# sampleRate: 1.0
|
||||
# fallbackToNoOpProviderOnError: true
|
||||
strategies:
|
||||
RemoveDuplicates:
|
||||
enabled: true
|
||||
@@ -161,6 +172,8 @@ podAnnotations: {}
|
||||
|
||||
podLabels: {}
|
||||
|
||||
dnsConfig: {}
|
||||
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
@@ -177,6 +190,9 @@ serviceMonitor:
|
||||
enabled: false
|
||||
# The namespace where Prometheus expects to find service monitors.
|
||||
# namespace: ""
|
||||
# Add custom labels to the ServiceMonitor resource
|
||||
additionalLabels: {}
|
||||
# prometheus: kube-prometheus-stack
|
||||
interval: ""
|
||||
# honorLabels: true
|
||||
insecureSkipVerify: true
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -43,6 +44,7 @@ type DeschedulerServer struct {
|
||||
EventClient clientset.Interface
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
DisableMetrics bool
|
||||
EnableHTTP2 bool
|
||||
}
|
||||
|
||||
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
||||
@@ -74,7 +76,9 @@ func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, err
|
||||
},
|
||||
}
|
||||
deschedulerscheme.Scheme.Default(&versionedCfg)
|
||||
cfg := componentconfig.DeschedulerConfiguration{}
|
||||
cfg := componentconfig.DeschedulerConfiguration{
|
||||
Tracing: componentconfig.TracingConfiguration{},
|
||||
}
|
||||
if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -83,7 +87,6 @@ func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, err
|
||||
|
||||
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
||||
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&rs.Logging.Format, "logging-format", "text", `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
|
||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
||||
fs.StringVar(&rs.ClientConnection.Kubeconfig, "kubeconfig", rs.ClientConnection.Kubeconfig, "File with kube configuration. Deprecated, use client-connection-kubeconfig instead.")
|
||||
fs.StringVar(&rs.ClientConnection.Kubeconfig, "client-connection-kubeconfig", rs.ClientConnection.Kubeconfig, "File path to kube configuration for interacting with kubernetes apiserver.")
|
||||
@@ -92,6 +95,13 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "Execute descheduler in dry run mode.")
|
||||
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
|
||||
fs.StringVar(&rs.Tracing.CollectorEndpoint, "otel-collector-endpoint", "", "Set this flag to the OpenTelemetry Collector Service Address")
|
||||
fs.StringVar(&rs.Tracing.TransportCert, "otel-transport-ca-cert", "", "Path of the CA Cert that can be used to generate the client Certificate for establishing secure connection to the OTEL in gRPC mode")
|
||||
fs.StringVar(&rs.Tracing.ServiceName, "otel-service-name", tracing.DefaultServiceName, "OTEL Trace name to be used with the resources")
|
||||
fs.StringVar(&rs.Tracing.ServiceNamespace, "otel-trace-namespace", "", "OTEL Trace namespace to be used with the resources")
|
||||
fs.Float64Var(&rs.Tracing.SampleRate, "otel-sample-rate", 1.0, "Sample rate to collect the Traces")
|
||||
fs.BoolVar(&rs.Tracing.FallbackToNoOpProviderOnError, "otel-fallback-no-op-on-error", false, "Fallback to NoOp Tracer in case of error")
|
||||
fs.BoolVar(&rs.EnableHTTP2, "enable-http2", false, "If http/2 should be enabled for the metrics and health check")
|
||||
|
||||
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
@@ -28,14 +27,18 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
apiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/server/mux"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
registry "k8s.io/component-base/logs/api/v1"
|
||||
jsonLog "k8s.io/component-base/logs/json"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/component-base/logs"
|
||||
logsapi "k8s.io/component-base/logs/api/v1"
|
||||
_ "k8s.io/component-base/logs/json/register"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -48,36 +51,31 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
klog.ErrorS(err, "unable to initialize server")
|
||||
}
|
||||
|
||||
featureGate := featuregate.NewFeatureGate()
|
||||
logConfig := logsapi.NewLoggingConfiguration()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "descheduler",
|
||||
Short: "descheduler",
|
||||
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
// s.Logs.Config.Format = s.Logging.Format
|
||||
|
||||
// LoopbackClientConfig is a config for a privileged loopback connection
|
||||
var LoopbackClientConfig *restclient.Config
|
||||
var SecureServing *apiserver.SecureServingInfo
|
||||
if err := s.SecureServing.ApplyTo(&SecureServing, &LoopbackClientConfig); err != nil {
|
||||
Long: "The descheduler evicts pods which may be bound to less desired nodes",
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
logs.InitLogs()
|
||||
if logsapi.ValidateAndApply(logConfig, featureGate); err != nil {
|
||||
return err
|
||||
}
|
||||
descheduler.SetupPlugins()
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// loopbackClientConfig is a config for a privileged loopback connection
|
||||
var loopbackClientConfig *restclient.Config
|
||||
var secureServing *apiserver.SecureServingInfo
|
||||
if err := s.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
|
||||
klog.ErrorS(err, "failed to apply secure server configuration")
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
var factory registry.LogFormatFactory
|
||||
if s.Logging.Format == "json" {
|
||||
factory = jsonLog.Factory{}
|
||||
}
|
||||
|
||||
if factory == nil {
|
||||
klog.ClearLogger()
|
||||
} else {
|
||||
log, loggerControl := factory.Create(registry.LoggingConfiguration{
|
||||
Format: s.Logging.Format,
|
||||
Verbosity: s.Logging.Verbosity,
|
||||
}, registry.LoggingOptions{})
|
||||
defer loggerControl.Flush()
|
||||
klog.SetLogger(log)
|
||||
}
|
||||
secureServing.DisableHTTP2 = !s.EnableHTTP2
|
||||
|
||||
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
@@ -88,33 +86,42 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
stoppedCh, _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
stoppedCh, _, err := secureServing.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
err = Run(ctx, s)
|
||||
if err != nil {
|
||||
if err = Run(ctx, s); err != nil {
|
||||
klog.ErrorS(err, "descheduler server")
|
||||
return err
|
||||
}
|
||||
|
||||
done()
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.SetOut(out)
|
||||
flags := cmd.Flags()
|
||||
s.AddFlags(flags)
|
||||
|
||||
runtime.Must(logsapi.AddFeatureGates(featureGate))
|
||||
logsapi.AddFlags(logConfig, flags)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
err := tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tracing.Shutdown(ctx)
|
||||
// increase the fake watch channel so the dry-run mode can be run
|
||||
// over a cluster with thousands of pods
|
||||
watch.DefaultChanSize = 100000
|
||||
return descheduler.Run(ctx, rs)
|
||||
}
|
||||
|
||||
func SetupLogs() {
|
||||
klog.SetOutput(os.Stdout)
|
||||
klog.InitFlags(nil)
|
||||
}
|
||||
|
||||
@@ -21,14 +21,8 @@ import (
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
)
|
||||
|
||||
func init() {
|
||||
app.SetupLogs()
|
||||
descheduler.SetupPlugins()
|
||||
}
|
||||
|
||||
func main() {
|
||||
out := os.Stdout
|
||||
cmd := app.NewDeschedulerCommand(out)
|
||||
|
||||
@@ -13,7 +13,7 @@ descheduler [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0)
|
||||
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces and IP address families will be used. (default 0.0.0.0)
|
||||
--cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates")
|
||||
--client-connection-burst int32 Burst to use for interacting with kubernetes apiserver.
|
||||
--client-connection-kubeconfig string File path to kube configuration for interacting with kubernetes apiserver.
|
||||
@@ -21,6 +21,7 @@ descheduler [flags]
|
||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
|
||||
--dry-run Execute descheduler in dry run mode.
|
||||
--enable-http2 If http/2 should be enabled for the metrics and health check
|
||||
-h, --help help for descheduler
|
||||
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
|
||||
--kubeconfig string File with kube configuration. Deprecated, use client-connection-kubeconfig instead.
|
||||
@@ -31,7 +32,16 @@ descheduler [flags]
|
||||
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
|
||||
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
|
||||
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 26s)
|
||||
--logging-format string Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning. (default "text")
|
||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||
--log-json-info-buffer-size quantity [Alpha] In JSON format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-json-split-stream [Alpha] In JSON format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--logging-format string Sets the log format. Permitted formats: "json" (gated by LoggingBetaOptions), "text". (default "text")
|
||||
--otel-collector-endpoint string Set this flag to the OpenTelemetry Collector Service Address
|
||||
--otel-fallback-no-op-on-error Fallback to NoOp Tracer in case of error
|
||||
--otel-sample-rate float Sample rate to collect the Traces (default 1)
|
||||
--otel-service-name string OTEL Trace name to be used with the resources (default "descheduler")
|
||||
--otel-trace-namespace string OTEL Trace namespace to be used with the resources
|
||||
--otel-transport-ca-cert string Path of the CA Cert that can be used to generate the client Certificate for establishing secure connection to the OTEL in gRPC mode
|
||||
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
|
||||
--permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]
|
||||
--policy-config-file string File with descheduler policy configuration.
|
||||
@@ -43,6 +53,8 @@ descheduler [flags]
|
||||
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
|
||||
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
|
||||
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
|
||||
-v, --v Level number for the log level verbosity
|
||||
--vmodule pattern=N,... comma-separated list of pattern=N settings for file-filtered logging (only works for text log format)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
@@ -109,17 +109,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.27.0' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.28.1' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.27.0' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.28.1' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A Deployment
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.27.0' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.28.1' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
|
||||
@@ -4,6 +4,9 @@ Starting with descheduler release v0.10.0 container images are available in the
|
||||
|
||||
Descheduler Version | Container Image | Architectures |
|
||||
------------------- |-------------------------------------------------|-------------------------|
|
||||
v0.28.1 | registry.k8s.io/descheduler/descheduler:v0.28.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.28.0 | registry.k8s.io/descheduler/descheduler:v0.28.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.27.1 | registry.k8s.io/descheduler/descheduler:v0.27.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.27.0 | registry.k8s.io/descheduler/descheduler:v0.27.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.26.1 | registry.k8s.io/descheduler/descheduler:v0.26.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.26.0 | registry.k8s.io/descheduler/descheduler:v0.26.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
|
||||
@@ -21,7 +21,9 @@ profiles:
|
||||
includingInitContainers: true
|
||||
- name: "RemovePodsViolatingTopologySpreadConstraint"
|
||||
args:
|
||||
includeSoftConstraints: true
|
||||
constraints:
|
||||
- DoNotSchedule
|
||||
- ScheduleAnyway
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
|
||||
15
examples/too-many-restarts.yml
Normal file
15
examples/too-many-restarts.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "RemovePodsHavingTooManyRestarts"
|
||||
args:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
states:
|
||||
- CrashLoopBackOff
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsHavingTooManyRestarts"
|
||||
@@ -5,7 +5,9 @@ profiles:
|
||||
pluginConfig:
|
||||
- name: "RemovePodsViolatingTopologySpreadConstraint"
|
||||
args:
|
||||
includeSoftConstraints: true # Include 'ScheduleAnyways' constraints
|
||||
constraints:
|
||||
- DoNotSchedule
|
||||
- ScheduleAnyway
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
|
||||
131
go.mod
131
go.mod
@@ -4,111 +4,114 @@ go 1.20
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/spf13/cobra v1.6.0
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
k8s.io/api v0.27.0
|
||||
k8s.io/apimachinery v0.27.0
|
||||
k8s.io/apiserver v0.27.0
|
||||
k8s.io/client-go v0.27.0
|
||||
k8s.io/code-generator v0.27.0
|
||||
k8s.io/component-base v0.27.0
|
||||
k8s.io/component-helpers v0.27.0
|
||||
k8s.io/klog/v2 v2.90.1
|
||||
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749
|
||||
go.opentelemetry.io/otel v1.21.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0
|
||||
go.opentelemetry.io/otel/sdk v1.21.0
|
||||
go.opentelemetry.io/otel/trace v1.21.0
|
||||
google.golang.org/grpc v1.59.0
|
||||
k8s.io/api v0.28.4
|
||||
k8s.io/apimachinery v0.28.4
|
||||
k8s.io/apiserver v0.28.4
|
||||
k8s.io/client-go v0.28.4
|
||||
k8s.io/code-generator v0.28.4
|
||||
k8s.io/component-base v0.28.4
|
||||
k8s.io/component-helpers v0.28.4
|
||||
k8s.io/klog/v2 v2.110.1
|
||||
k8s.io/utils v0.0.0-20231121161247-cf03d44ff3cf
|
||||
sigs.k8s.io/mdtoc v1.1.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.4.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.1 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
|
||||
github.com/google/cel-go v0.12.6 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/google/cel-go v0.16.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.1 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.7 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.7 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect
|
||||
go.opentelemetry.io/otel v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.31.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.10.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.9 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.9 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/mod v0.9.0 // indirect
|
||||
golang.org/x/net v0.8.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/term v0.6.0 // indirect
|
||||
golang.org/x/text v0.8.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||
golang.org/x/tools v0.7.0 // indirect
|
||||
golang.org/x/crypto v0.15.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/net v0.18.0 // indirect
|
||||
golang.org/x/oauth2 v0.11.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.14.0 // indirect
|
||||
golang.org/x/term v0.14.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
|
||||
google.golang.org/grpc v1.51.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
|
||||
k8s.io/kms v0.27.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1 // indirect
|
||||
k8s.io/kms v0.28.4 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
replace go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1
|
||||
|
||||
730
go.sum
730
go.sum
@@ -1,758 +1,326 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o=
|
||||
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU=
|
||||
github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
|
||||
github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8=
|
||||
github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6XpZ4JqolL5u642Js5Z0n7psPVl+S5heo=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M=
|
||||
github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw=
|
||||
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
|
||||
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo=
|
||||
github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
|
||||
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk=
|
||||
github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
|
||||
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
|
||||
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI=
|
||||
github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY=
|
||||
go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY=
|
||||
go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU=
|
||||
go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4=
|
||||
go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc=
|
||||
go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c=
|
||||
go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4=
|
||||
go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0=
|
||||
go.opentelemetry.io/otel/metric v0.31.0 h1:6SiklT+gfWAwWUR0meEMxQBtihpiEs4c+vL9spDTqUs=
|
||||
go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A=
|
||||
go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY=
|
||||
go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE=
|
||||
go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E=
|
||||
go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
|
||||
go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs=
|
||||
go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
|
||||
go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo=
|
||||
go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E=
|
||||
go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI=
|
||||
go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
|
||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
|
||||
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
||||
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
|
||||
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
|
||||
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
|
||||
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
|
||||
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
|
||||
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
|
||||
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
|
||||
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
|
||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I=
|
||||
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
|
||||
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
|
||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.27.0 h1:2owttiA8Oa+J3idFeq8TSnNpm4y6AOGPI3PDbIpp2cE=
|
||||
k8s.io/api v0.27.0/go.mod h1:Wl+QRvQlh+T8SK5f4F6YBhhyH6hrFO08nl74xZb1MUE=
|
||||
k8s.io/apimachinery v0.27.0 h1:vEyy/PVMbPMCPutrssCVHCf0JNZ0Px+YqPi82K2ALlk=
|
||||
k8s.io/apimachinery v0.27.0/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM=
|
||||
k8s.io/apiserver v0.27.0 h1:sXt/2yVMebZef6GqJHs4IYHSdSYwwrJCafBV/KSCwDw=
|
||||
k8s.io/apiserver v0.27.0/go.mod h1:8heEJ5f6EqiKwXC3Ez3ikgOvGtRSEQG/SQZkhO9UzIg=
|
||||
k8s.io/client-go v0.27.0 h1:DyZS1fJkv73tEy7rWv4VF6NwGeJ7SKvNaLRXZBYLA+4=
|
||||
k8s.io/client-go v0.27.0/go.mod h1:XVEmpNnM+4JYO3EENoFV/ZDv3KxKVJUnzGo70avk+C4=
|
||||
k8s.io/code-generator v0.27.0 h1:XQsGgUKnxuaPNr5V+Bg/AUmbvfhVu1jfxTW8PpQyGs0=
|
||||
k8s.io/code-generator v0.27.0/go.mod h1:iWtpm0ZMG6Gc4daWfITDSIu+WFhFJArYDhj242zcbnY=
|
||||
k8s.io/component-base v0.27.0 h1:g3/FkscH8Uqg9SiDCEfhfhTVwKiVo4T2+iBwUqiFkMg=
|
||||
k8s.io/component-base v0.27.0/go.mod h1:PXyBQd/vYYjqqGB83rnsHffTTG6zlmxZAd0ZSOu6evk=
|
||||
k8s.io/component-helpers v0.27.0 h1:rymQGJc4s30hHeb5VGuPdht8gKIPecj+Bw2FOJSavE4=
|
||||
k8s.io/component-helpers v0.27.0/go.mod h1:vMjVwym/Y0BVyNvg8a4Et2vyPJAh/JhBM0OTRAt0Ceg=
|
||||
k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY=
|
||||
k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0=
|
||||
k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8=
|
||||
k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg=
|
||||
k8s.io/apiserver v0.28.4 h1:BJXlaQbAU/RXYX2lRz+E1oPe3G3TKlozMMCZWu5GMgg=
|
||||
k8s.io/apiserver v0.28.4/go.mod h1:Idq71oXugKZoVGUUL2wgBCTHbUR+FYTWa4rq9j4n23w=
|
||||
k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY=
|
||||
k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4=
|
||||
k8s.io/code-generator v0.28.4 h1:tcOSNIZQvuAvXhOwpbuJkKbAABJQeyCcQBCN/3uI18c=
|
||||
k8s.io/code-generator v0.28.4/go.mod h1:OQAfl6bZikQ/tK6faJ18Vyzo54rUII2NmjurHyiN1g4=
|
||||
k8s.io/component-base v0.28.4 h1:c/iQLWPdUgI90O+T9TeECg8o7N3YJTiuz2sKxILYcYo=
|
||||
k8s.io/component-base v0.28.4/go.mod h1:m9hR0uvqXDybiGL2nf/3Lf0MerAfQXzkfWhUY58JUbU=
|
||||
k8s.io/component-helpers v0.28.4 h1:+X9VXT5+jUsRdC26JyMZ8Fjfln7mSjgumafocE509C4=
|
||||
k8s.io/component-helpers v0.28.4/go.mod h1:8LzMalOQ0K10tkBJWBWq8h0HTI9HDPx4WT3QvTFn9Ro=
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08=
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
|
||||
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kms v0.27.0 h1:adCotKQybOjxwbxW7ogXyv8uQGan/3Y126S2aNW4YFY=
|
||||
k8s.io/kms v0.27.0/go.mod h1:vI2R4Nhw+PZ+DYtVPVYKsIqip2IYjZWK9bESR64WdIw=
|
||||
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a h1:gmovKNur38vgoWfGtP5QOGNOA7ki4n6qNYoFAgMlNvg=
|
||||
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY=
|
||||
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 h1:xMMXJlJbsU8w3V5N2FLDQ8YgU8s1EoULdbQBcAeNJkY=
|
||||
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1 h1:MB1zkK+WMOmfLxEpjr1wEmkpcIhZC7kfTkZ0stg5bog=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1/go.mod h1:/4NLd21PQY0B+H+X0aDZdwUiVXYJQl/2NXA5KVtDiP4=
|
||||
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
|
||||
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
|
||||
k8s.io/kms v0.28.4 h1:PMgY/3CQTWP9eIKmNQiTgjLIZ0ns6O+voagzD2/4mSg=
|
||||
k8s.io/kms v0.28.4/go.mod h1:HL4/lR/bhjAJPbqycKtfhWiKh1Sp21cpHOL8P4oo87w=
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
|
||||
k8s.io/utils v0.0.0-20231121161247-cf03d44ff3cf h1:iTzha1p7Fi83476ypNSz8nV9iR9932jIIs26F7gNLsU=
|
||||
k8s.io/utils v0.0.0-20231121161247-cf03d44ff3cf/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo=
|
||||
|
||||
257
hack/cherry_pick_pull.sh
Executable file
257
hack/cherry_pick_pull.sh
Executable file
@@ -0,0 +1,257 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Usage Instructions: https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md
|
||||
|
||||
# Checkout a PR from GitHub. (Yes, this is sitting in a Git tree. How
|
||||
# meta.) Assumes you care about pulls from remote "upstream" and
|
||||
# checks them out to a branch named:
|
||||
# automated-cherry-pick-of-<pr>-<target branch>-<timestamp>
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
REPO_ROOT="$(git rev-parse --show-toplevel)"
|
||||
declare -r REPO_ROOT
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
STARTINGBRANCH=$(git symbolic-ref --short HEAD)
|
||||
declare -r STARTINGBRANCH
|
||||
declare -r REBASEMAGIC="${REPO_ROOT}/.git/rebase-apply"
|
||||
DRY_RUN=${DRY_RUN:-""}
|
||||
REGENERATE_DOCS=${REGENERATE_DOCS:-""}
|
||||
UPSTREAM_REMOTE=${UPSTREAM_REMOTE:-upstream}
|
||||
FORK_REMOTE=${FORK_REMOTE:-origin}
|
||||
MAIN_REPO_ORG=${MAIN_REPO_ORG:-$(git remote get-url "$UPSTREAM_REMOTE" | awk '{gsub(/http[s]:\/\/|git@/,"")}1' | awk -F'[@:./]' 'NR==1{print $3}')}
|
||||
MAIN_REPO_NAME=${MAIN_REPO_NAME:-$(git remote get-url "$UPSTREAM_REMOTE" | awk '{gsub(/http[s]:\/\/|git@/,"")}1' | awk -F'[@:./]' 'NR==1{print $4}')}
|
||||
|
||||
if [[ -z ${GITHUB_USER:-} ]]; then
|
||||
echo "Please export GITHUB_USER=<your-user> (or GH organization, if that's where your fork lives)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v gh > /dev/null; then
|
||||
echo "Can't find 'gh' tool in PATH, please install from https://github.com/cli/cli"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$#" -lt 2 ]]; then
|
||||
echo "${0} <remote branch> <pr-number>...: cherry pick one or more <pr> onto <remote branch> and leave instructions for proposing pull request"
|
||||
echo
|
||||
echo " Checks out <remote branch> and handles the cherry-pick of <pr> (possibly multiple) for you."
|
||||
echo " Examples:"
|
||||
echo " $0 upstream/release-3.14 12345 # Cherry-picks PR 12345 onto upstream/release-3.14 and proposes that as a PR."
|
||||
echo " $0 upstream/release-3.14 12345 56789 # Cherry-picks PR 12345, then 56789 and proposes the combination as a single PR."
|
||||
echo
|
||||
echo " Set the DRY_RUN environment var to skip git push and creating PR."
|
||||
echo " This is useful for creating patches to a release branch without making a PR."
|
||||
echo " When DRY_RUN is set the script will leave you in a branch containing the commits you cherry-picked."
|
||||
echo
|
||||
echo " Set the REGENERATE_DOCS environment var to regenerate documentation for the target branch after picking the specified commits."
|
||||
echo " This is useful when picking commits containing changes to API documentation."
|
||||
echo
|
||||
echo " Set UPSTREAM_REMOTE (default: upstream) and FORK_REMOTE (default: origin)"
|
||||
echo " to override the default remote names to what you have locally."
|
||||
echo
|
||||
echo " For merge process info, see https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Checks if you are logged in. Will error/bail if you are not.
|
||||
gh auth status
|
||||
|
||||
if git_status=$(git status --porcelain --untracked=no 2>/dev/null) && [[ -n "${git_status}" ]]; then
|
||||
echo "!!! Dirty tree. Clean up and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -e "${REBASEMAGIC}" ]]; then
|
||||
echo "!!! 'git rebase' or 'git am' in progress. Clean up and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
declare -r BRANCH="$1"
|
||||
shift 1
|
||||
declare -r PULLS=( "$@" )
|
||||
|
||||
function join { local IFS="$1"; shift; echo "$*"; }
|
||||
PULLDASH=$(join - "${PULLS[@]/#/#}") # Generates something like "#12345-#56789"
|
||||
declare -r PULLDASH
|
||||
PULLSUBJ=$(join " " "${PULLS[@]/#/#}") # Generates something like "#12345 #56789"
|
||||
declare -r PULLSUBJ
|
||||
|
||||
echo "+++ Updating remotes..."
|
||||
git remote update "${UPSTREAM_REMOTE}" "${FORK_REMOTE}"
|
||||
|
||||
if ! git log -n1 --format=%H "${BRANCH}" >/dev/null 2>&1; then
|
||||
echo "!!! '${BRANCH}' not found. The second argument should be something like ${UPSTREAM_REMOTE}/release-0.21."
|
||||
echo " (In particular, it needs to be a valid, existing remote branch that I can 'git checkout'.)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NEWBRANCHREQ="automated-cherry-pick-of-${PULLDASH}" # "Required" portion for tools.
|
||||
declare -r NEWBRANCHREQ
|
||||
NEWBRANCH="$(echo "${NEWBRANCHREQ}-${BRANCH}" | sed 's/\//-/g')"
|
||||
declare -r NEWBRANCH
|
||||
NEWBRANCHUNIQ="${NEWBRANCH}-$(date +%s)"
|
||||
declare -r NEWBRANCHUNIQ
|
||||
echo "+++ Creating local branch ${NEWBRANCHUNIQ}"
|
||||
|
||||
cleanbranch=""
|
||||
gitamcleanup=false
|
||||
function return_to_kansas {
|
||||
if [[ "${gitamcleanup}" == "true" ]]; then
|
||||
echo
|
||||
echo "+++ Aborting in-progress git am."
|
||||
git am --abort >/dev/null 2>&1 || true
|
||||
fi
|
||||
|
||||
# return to the starting branch and delete the PR text file
|
||||
if [[ -z "${DRY_RUN}" ]]; then
|
||||
echo
|
||||
echo "+++ Returning you to the ${STARTINGBRANCH} branch and cleaning up."
|
||||
git checkout -f "${STARTINGBRANCH}" >/dev/null 2>&1 || true
|
||||
if [[ -n "${cleanbranch}" ]]; then
|
||||
git branch -D "${cleanbranch}" >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
}
|
||||
trap return_to_kansas EXIT
|
||||
|
||||
SUBJECTS=()
|
||||
function make-a-pr() {
|
||||
local rel
|
||||
rel="$(basename "${BRANCH}")"
|
||||
echo
|
||||
echo "+++ Creating a pull request on GitHub at ${GITHUB_USER}:${NEWBRANCH}"
|
||||
|
||||
local numandtitle
|
||||
numandtitle=$(printf '%s\n' "${SUBJECTS[@]}")
|
||||
prtext=$(cat <<EOF
|
||||
Cherry pick of ${PULLSUBJ} on ${rel}.
|
||||
|
||||
${numandtitle}
|
||||
|
||||
For details on the cherry pick process, see the [cherry pick requests](https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md) page.
|
||||
|
||||
\`\`\`release-note
|
||||
|
||||
\`\`\`
|
||||
EOF
|
||||
)
|
||||
|
||||
gh pr create --title="Automated cherry pick of ${numandtitle}" --body="${prtext}" --head "${GITHUB_USER}:${NEWBRANCH}" --base "${rel}" --repo="${MAIN_REPO_ORG}/${MAIN_REPO_NAME}"
|
||||
}
|
||||
|
||||
git checkout -b "${NEWBRANCHUNIQ}" "${BRANCH}"
|
||||
cleanbranch="${NEWBRANCHUNIQ}"
|
||||
|
||||
gitamcleanup=true
|
||||
for pull in "${PULLS[@]}"; do
|
||||
echo "+++ Downloading patch to /tmp/${pull}.patch (in case you need to do this again)"
|
||||
|
||||
curl -o "/tmp/${pull}.patch" -sSL "https://github.com/${MAIN_REPO_ORG}/${MAIN_REPO_NAME}/pull/${pull}.patch"
|
||||
echo
|
||||
echo "+++ About to attempt cherry pick of PR. To reattempt:"
|
||||
echo " $ git am -3 /tmp/${pull}.patch"
|
||||
echo
|
||||
git am -3 "/tmp/${pull}.patch" || {
|
||||
conflicts=false
|
||||
while unmerged=$(git status --porcelain | grep ^U) && [[ -n ${unmerged} ]] \
|
||||
|| [[ -e "${REBASEMAGIC}" ]]; do
|
||||
conflicts=true # <-- We should have detected conflicts once
|
||||
echo
|
||||
echo "+++ Conflicts detected:"
|
||||
echo
|
||||
(git status --porcelain | grep ^U) || echo "!!! None. Did you git am --continue?"
|
||||
echo
|
||||
echo "+++ Please resolve the conflicts in another window (and remember to 'git add / git am --continue')"
|
||||
read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r
|
||||
echo
|
||||
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
|
||||
echo "Aborting." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "${conflicts}" != "true" ]]; then
|
||||
echo "!!! git am failed, likely because of an in-progress 'git am' or 'git rebase'"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# set the subject
|
||||
subject=$(grep -m 1 "^Subject" "/tmp/${pull}.patch" | sed -e 's/Subject: \[PATCH//g' | sed 's/.*] //')
|
||||
SUBJECTS+=("#${pull}: ${subject}")
|
||||
|
||||
# remove the patch file from /tmp
|
||||
rm -f "/tmp/${pull}.patch"
|
||||
done
|
||||
gitamcleanup=false
|
||||
|
||||
# Re-generate docs (if needed)
|
||||
if [[ -n "${REGENERATE_DOCS}" ]]; then
|
||||
echo
|
||||
echo "Regenerating docs..."
|
||||
if ! hack/generate-docs.sh; then
|
||||
echo
|
||||
echo "hack/generate-docs.sh FAILED to complete."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${DRY_RUN}" ]]; then
|
||||
echo "!!! Skipping git push and PR creation because you set DRY_RUN."
|
||||
echo "To return to the branch you were in when you invoked this script:"
|
||||
echo
|
||||
echo " git checkout ${STARTINGBRANCH}"
|
||||
echo
|
||||
echo "To delete this branch:"
|
||||
echo
|
||||
echo " git branch -D ${NEWBRANCHUNIQ}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if git remote -v | grep ^"${FORK_REMOTE}" | grep "${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git"; then
|
||||
echo "!!! You have ${FORK_REMOTE} configured as your ${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git"
|
||||
echo "This isn't normal. Leaving you with push instructions:"
|
||||
echo
|
||||
echo "+++ First manually push the branch this script created:"
|
||||
echo
|
||||
echo " git push REMOTE ${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
echo
|
||||
echo "where REMOTE is your personal fork (maybe ${UPSTREAM_REMOTE}? Consider swapping those.)."
|
||||
echo "OR consider setting UPSTREAM_REMOTE and FORK_REMOTE to different values."
|
||||
echo
|
||||
make-a-pr
|
||||
cleanbranch=""
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "+++ I'm about to do the following to push to GitHub (and I'm assuming ${FORK_REMOTE} is your personal fork):"
|
||||
echo
|
||||
echo " git push ${FORK_REMOTE} ${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
echo
|
||||
read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r
|
||||
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
|
||||
echo "Aborting." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git push "${FORK_REMOTE}" -f "${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
make-a-pr
|
||||
@@ -1,5 +1,9 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
featureGates:
|
||||
# beta as of 1.27 but we currently run e2e on 1.26
|
||||
# this flag should be removed as part of Descheduler 0.29 release
|
||||
MatchLabelKeysInPodTopologySpread: true
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
|
||||
@@ -13,6 +13,7 @@ data:
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
- name: "RemovePodsViolatingInterPodAntiAffinity"
|
||||
- name: "RemoveDuplicates"
|
||||
- name: "LowNodeUtilization"
|
||||
args:
|
||||
thresholds:
|
||||
@@ -30,4 +31,4 @@ data:
|
||||
- "RemoveDuplicates"
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsViolatingInterPodAntiAffinity"
|
||||
- "RemovePodsViolatingInterPodAntiAffinity"
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.27.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.28.1
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: descheduler-sa
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.27.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.28.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.27.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.28.1
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -19,7 +19,9 @@ package v1alpha1
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
@@ -170,10 +172,15 @@ var StrategyParamsToPluginArgs = map[string]func(params *StrategyParameters) (*a
|
||||
}, nil
|
||||
},
|
||||
"RemovePodsViolatingTopologySpreadConstraint": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
constraints := []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule}
|
||||
if params.IncludeSoftConstraints {
|
||||
constraints = append(constraints, v1.ScheduleAnyway)
|
||||
}
|
||||
args := &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
IncludeSoftConstraints: params.IncludeSoftConstraints,
|
||||
Constraints: constraints,
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
}
|
||||
if err := removepodsviolatingtopologyspreadconstraint.ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
@@ -567,13 +568,28 @@ func TestStrategyParamsToPluginArgsRemovePodsViolatingTopologySpreadConstraint(t
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
IncludeSoftConstraints: true,
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "params without soft constraints",
|
||||
params: &StrategyParameters{
|
||||
IncludeSoftConstraints: false,
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
|
||||
@@ -88,7 +88,7 @@ func convertToInternalPluginConfigArgs(out *api.DeschedulerPolicy) error {
|
||||
func Convert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
if _, ok := pluginregistry.PluginRegistry[in.Name]; ok {
|
||||
out.Args = pluginregistry.PluginRegistry[in.Name].PluginArgInstance
|
||||
out.Args = pluginregistry.PluginRegistry[in.Name].PluginArgInstance.DeepCopyObject()
|
||||
if in.Args.Raw != nil {
|
||||
_, _, err := Codecs.UniversalDecoder().Decode(in.Args.Raw, nil, out.Args)
|
||||
if err != nil {
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
registry "k8s.io/component-base/logs/api/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -55,14 +54,35 @@ type DeschedulerConfiguration struct {
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool
|
||||
|
||||
// Tracing specifies the options for tracing.
|
||||
Tracing TracingConfiguration
|
||||
|
||||
// LeaderElection starts Deployment using leader election loop
|
||||
LeaderElection componentbaseconfig.LeaderElectionConfiguration
|
||||
|
||||
// Logging specifies the options of logging.
|
||||
// Refer to [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
|
||||
Logging registry.LoggingConfiguration
|
||||
|
||||
// ClientConnection specifies the kubeconfig file and client connection settings to use when communicating with the apiserver.
|
||||
// Refer to [ClientConnection](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/componentconfig#ClientConnectionConfiguration) for more information.
|
||||
ClientConnection componentbaseconfig.ClientConnectionConfiguration
|
||||
}
|
||||
|
||||
type TracingConfiguration struct {
|
||||
// CollectorEndpoint is the address of the OpenTelemetry collector.
|
||||
// If not specified, tracing will be used NoopTraceProvider.
|
||||
CollectorEndpoint string
|
||||
// TransportCert is the path to the certificate file for the OpenTelemetry collector.
|
||||
// If not specified, provider will start in insecure mode.
|
||||
TransportCert string
|
||||
// ServiceName is the name of the service to be used in the OpenTelemetry collector.
|
||||
// If not specified, the default value is "descheduler".
|
||||
ServiceName string
|
||||
// ServiceNamespace is the namespace of the service to be used in the OpenTelemetry collector.
|
||||
// If not specified, tracing will be used default namespace.
|
||||
ServiceNamespace string
|
||||
// SampleRate is used to configure the sample rate of the OTEL trace collection. This value will
|
||||
// be used as the Base value with sample ratio. A value >= 1.0 will sample everything and < 0 will
|
||||
// not sample anything. Everything else is a percentage value.
|
||||
SampleRate float64
|
||||
// FallbackToNoOpProviderOnError can be set in case if you want your trace provider to fallback to
|
||||
// no op provider in case if the configured end point based provider can't be setup.
|
||||
FallbackToNoOpProviderOnError bool
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
registry "k8s.io/component-base/logs/api/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -55,14 +54,35 @@ type DeschedulerConfiguration struct {
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
|
||||
|
||||
// Tracing is used to setup the required OTEL tracing configuration
|
||||
Tracing TracingConfiguration `json:"tracing,omitempty"`
|
||||
|
||||
// LeaderElection starts Deployment using leader election loop
|
||||
LeaderElection componentbaseconfig.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
|
||||
|
||||
// Logging specifies the options of logging.
|
||||
// Refer to [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
|
||||
Logging registry.LoggingConfiguration `json:"logging,omitempty"`
|
||||
|
||||
// ClientConnection specifies the kubeconfig file and client connection settings to use when communicating with the apiserver.
|
||||
// Refer to [ClientConnection](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/componentconfig#ClientConnectionConfiguration) for more information.
|
||||
ClientConnection componentbaseconfig.ClientConnectionConfiguration `json:"clientConnection,omitempty"`
|
||||
}
|
||||
|
||||
type TracingConfiguration struct {
|
||||
// CollectorEndpoint is the address of the OpenTelemetry collector.
|
||||
// If not specified, tracing will be used NoopTraceProvider.
|
||||
CollectorEndpoint string `json:"collectorEndpoint"`
|
||||
// TransportCert is the path to the certificate file for the OpenTelemetry collector.
|
||||
// If not specified, provider will start in insecure mode.
|
||||
TransportCert string `json:"transportCert,omitempty"`
|
||||
// ServiceName is the name of the service to be used in the OpenTelemetry collector.
|
||||
// If not specified, the default value is "descheduler".
|
||||
ServiceName string `json:"serviceName,omitempty"`
|
||||
// ServiceNamespace is the namespace of the service to be used in the OpenTelemetry collector.
|
||||
// If not specified, tracing will be used default namespace.
|
||||
ServiceNamespace string `json:"serviceNamespace,omitempty"`
|
||||
// SampleRate is used to configure the sample rate of the OTEL trace collection. This value will
|
||||
// be used as the Base value with sample ratio. A value >= 1.0 will sample everything and < 0 will
|
||||
// not sample anything. Everything else is a percentage value.
|
||||
SampleRate float64 `json:"sampleRate"`
|
||||
// FallbackToNoOpProviderOnError can be set in case if you want your trace provider to fallback to
|
||||
// no op provider in case if the configured end point based provider can't be setup.
|
||||
FallbackToNoOpProviderOnError bool `json:"fallbackToNoOpProviderOnError"`
|
||||
}
|
||||
|
||||
@@ -46,6 +46,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*TracingConfiguration)(nil), (*componentconfig.TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(a.(*TracingConfiguration), b.(*componentconfig.TracingConfiguration), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*componentconfig.TracingConfiguration)(nil), (*TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(a.(*componentconfig.TracingConfiguration), b.(*TracingConfiguration), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -58,8 +68,10 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
if err := Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.LeaderElection = in.LeaderElection
|
||||
out.Logging = in.Logging
|
||||
out.ClientConnection = in.ClientConnection
|
||||
return nil
|
||||
}
|
||||
@@ -78,8 +90,10 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
if err := Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.LeaderElection = in.LeaderElection
|
||||
out.Logging = in.Logging
|
||||
out.ClientConnection = in.ClientConnection
|
||||
return nil
|
||||
}
|
||||
@@ -88,3 +102,33 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
||||
func Convert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in *componentconfig.DeschedulerConfiguration, out *DeschedulerConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in *TracingConfiguration, out *componentconfig.TracingConfiguration, s conversion.Scope) error {
|
||||
out.CollectorEndpoint = in.CollectorEndpoint
|
||||
out.TransportCert = in.TransportCert
|
||||
out.ServiceName = in.ServiceName
|
||||
out.ServiceNamespace = in.ServiceNamespace
|
||||
out.SampleRate = in.SampleRate
|
||||
out.FallbackToNoOpProviderOnError = in.FallbackToNoOpProviderOnError
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in *TracingConfiguration, out *componentconfig.TracingConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *componentconfig.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
|
||||
out.CollectorEndpoint = in.CollectorEndpoint
|
||||
out.TransportCert = in.TransportCert
|
||||
out.ServiceName = in.ServiceName
|
||||
out.ServiceNamespace = in.ServiceNamespace
|
||||
out.SampleRate = in.SampleRate
|
||||
out.FallbackToNoOpProviderOnError = in.FallbackToNoOpProviderOnError
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration is an autogenerated conversion function.
|
||||
func Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *componentconfig.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
@@ -29,8 +29,8 @@ import (
|
||||
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.Tracing = in.Tracing
|
||||
out.LeaderElection = in.LeaderElection
|
||||
in.Logging.DeepCopyInto(&out.Logging)
|
||||
out.ClientConnection = in.ClientConnection
|
||||
return
|
||||
}
|
||||
@@ -52,3 +52,19 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
|
||||
func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TracingConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
18
pkg/apis/componentconfig/zz_generated.deepcopy.go
generated
18
pkg/apis/componentconfig/zz_generated.deepcopy.go
generated
@@ -29,8 +29,8 @@ import (
|
||||
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.Tracing = in.Tracing
|
||||
out.LeaderElection = in.LeaderElection
|
||||
in.Logging.DeepCopyInto(&out.Logging)
|
||||
out.ClientConnection = in.ClientConnection
|
||||
return
|
||||
}
|
||||
@@ -52,3 +52,19 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
|
||||
func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TracingConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -18,42 +18,208 @@ package descheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/events"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
schedulingv1 "k8s.io/client-go/listers/scheduling/v1"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
frameworkprofile "sigs.k8s.io/descheduler/pkg/framework/profile"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status
|
||||
|
||||
type profileRunner struct {
|
||||
name string
|
||||
descheduleEPs, balanceEPs eprunner
|
||||
}
|
||||
|
||||
type descheduler struct {
|
||||
rs *options.DeschedulerServer
|
||||
podLister listersv1.PodLister
|
||||
nodeLister listersv1.NodeLister
|
||||
namespaceLister listersv1.NamespaceLister
|
||||
priorityClassLister schedulingv1.PriorityClassLister
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
evictionPolicyGroupVersion string
|
||||
deschedulerPolicy *api.DeschedulerPolicy
|
||||
eventRecorder events.EventRecorder
|
||||
}
|
||||
|
||||
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
namespaceLister := sharedInformerFactory.Core().V1().Namespaces().Lister()
|
||||
priorityClassLister := sharedInformerFactory.Scheduling().V1().PriorityClasses().Lister()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
return &descheduler{
|
||||
rs: rs,
|
||||
podLister: podLister,
|
||||
nodeLister: nodeLister,
|
||||
namespaceLister: namespaceLister,
|
||||
priorityClassLister: priorityClassLister,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
evictionPolicyGroupVersion: evictionPolicyGroupVersion,
|
||||
deschedulerPolicy: deschedulerPolicy,
|
||||
eventRecorder: eventRecorder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node) error {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "runDeschedulerLoop")
|
||||
defer span.End()
|
||||
defer func(loopStartDuration time.Time) {
|
||||
metrics.DeschedulerLoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
|
||||
}(time.Now())
|
||||
|
||||
// if len is still <= 1 error out
|
||||
if len(nodes) <= 1 {
|
||||
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||
return fmt.Errorf("the cluster size is 0 or 1")
|
||||
}
|
||||
|
||||
var client clientset.Interface
|
||||
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
|
||||
// So when evicting pods while running multiple strategies in a row have the cummulative effect
|
||||
// as is when evicting pods for real.
|
||||
if d.rs.DryRun {
|
||||
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
|
||||
// Create a new cache so we start from scratch without any leftovers
|
||||
fakeClient, err := cachedClient(d.rs.Client, d.podLister, d.nodeLister, d.namespaceLister, d.priorityClassLister)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create a new instance of the shared informer factor from the cached client
|
||||
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
// register the pod informer, otherwise it will not get running
|
||||
d.getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
|
||||
if err != nil {
|
||||
return fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
fakeCtx, cncl := context.WithCancel(context.TODO())
|
||||
defer cncl()
|
||||
fakeSharedInformerFactory.Start(fakeCtx.Done())
|
||||
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
|
||||
|
||||
client = fakeClient
|
||||
d.sharedInformerFactory = fakeSharedInformerFactory
|
||||
} else {
|
||||
client = d.rs.Client
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Building a pod evictor")
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
client,
|
||||
d.evictionPolicyGroupVersion,
|
||||
d.rs.DryRun,
|
||||
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
||||
nodes,
|
||||
!d.rs.DisableMetrics,
|
||||
d.eventRecorder,
|
||||
)
|
||||
|
||||
d.runProfiles(ctx, client, nodes, podEvictor)
|
||||
|
||||
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runProfiles runs all the deschedule plugins of all profiles and
|
||||
// later runs through all balance plugins of all profiles. (All Balance plugins should come after all Deschedule plugins)
|
||||
// see https://github.com/kubernetes-sigs/descheduler/issues/979
|
||||
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "runProfiles")
|
||||
defer span.End()
|
||||
var profileRunners []profileRunner
|
||||
for _, profile := range d.deschedulerPolicy.Profiles {
|
||||
currProfile, err := frameworkprofile.NewProfile(
|
||||
profile,
|
||||
pluginregistry.PluginRegistry,
|
||||
frameworkprofile.WithClientSet(client),
|
||||
frameworkprofile.WithSharedInformerFactory(d.sharedInformerFactory),
|
||||
frameworkprofile.WithPodEvictor(podEvictor),
|
||||
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
|
||||
continue
|
||||
}
|
||||
profileRunners = append(profileRunners, profileRunner{profile.Name, currProfile.RunDeschedulePlugins, currProfile.RunBalancePlugins})
|
||||
}
|
||||
|
||||
for _, profileR := range profileRunners {
|
||||
// First deschedule
|
||||
status := profileR.descheduleEPs(ctx, nodes)
|
||||
if status != nil && status.Err != nil {
|
||||
span.AddEvent("failed to perform deschedule operations", trace.WithAttributes(attribute.String("err", status.Err.Error()), attribute.String("profile", profileR.name), attribute.String("operation", tracing.DescheduleOperation)))
|
||||
klog.ErrorS(status.Err, "running deschedule extension point failed with error", "profile", profileR.name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for _, profileR := range profileRunners {
|
||||
// Balance Later
|
||||
status := profileR.balanceEPs(ctx, nodes)
|
||||
if status != nil && status.Err != nil {
|
||||
span.AddEvent("failed to perform balance operations", trace.WithAttributes(attribute.String("err", status.Err.Error()), attribute.String("profile", profileR.name), attribute.String("operation", tracing.BalanceOperation)))
|
||||
klog.ErrorS(status.Err, "running balance extension point failed with error", "profile", profileR.name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "Run")
|
||||
defer span.End()
|
||||
metrics.Register()
|
||||
|
||||
clientConnection := rs.ClientConnection
|
||||
@@ -76,7 +242,9 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
}
|
||||
|
||||
// Add k8s compatibility warnings to logs
|
||||
versionCompatibilityCheck(rs)
|
||||
if err := validateVersionCompatibility(rs.Client.Discovery(), version.Get()); err != nil {
|
||||
klog.Warning(err.Error())
|
||||
}
|
||||
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(rs.Client)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
@@ -88,15 +256,17 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
}
|
||||
|
||||
if rs.LeaderElection.LeaderElect && rs.DeschedulingInterval.Seconds() == 0 {
|
||||
span.AddEvent("Validation Failure", trace.WithAttributes(attribute.String("err", "leaderElection must be used with deschedulingInterval")))
|
||||
return fmt.Errorf("leaderElection must be used with deschedulingInterval")
|
||||
}
|
||||
|
||||
if rs.LeaderElection.LeaderElect && rs.DryRun {
|
||||
klog.V(1).InfoS("Warning: DryRun is set to True. You need to disable it to use Leader Election.")
|
||||
klog.V(1).Info("Warning: DryRun is set to True. You need to disable it to use Leader Election.")
|
||||
}
|
||||
|
||||
if rs.LeaderElection.LeaderElect && !rs.DryRun {
|
||||
if err := NewLeaderElection(runFn, rsclient, &rs.LeaderElection, ctx); err != nil {
|
||||
span.AddEvent("Leader Election Failure", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
return fmt.Errorf("leaderElection: %w", err)
|
||||
}
|
||||
return nil
|
||||
@@ -105,27 +275,34 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
return runFn()
|
||||
}
|
||||
|
||||
func versionCompatibilityCheck(rs *options.DeschedulerServer) {
|
||||
serverVersion, serverErr := rs.Client.Discovery().ServerVersion()
|
||||
if serverErr != nil {
|
||||
klog.V(1).InfoS("Warning: Get Kubernetes server version fail")
|
||||
return
|
||||
}
|
||||
|
||||
deschedulerMinorVersion := strings.Split(version.Get().Minor, ".")[0]
|
||||
deschedulerMinorVersionFloat, err := strconv.ParseFloat(deschedulerMinorVersion, 64)
|
||||
func validateVersionCompatibility(discovery discovery.DiscoveryInterface, versionInfo version.Info) error {
|
||||
serverVersionInfo, err := discovery.ServerVersion()
|
||||
if err != nil {
|
||||
klog.Warning("Warning: Convert Descheduler minor version to float fail")
|
||||
return errors.New("failed to discover Kubernetes server version")
|
||||
}
|
||||
|
||||
kubernetesMinorVersionFloat, err := strconv.ParseFloat(serverVersion.Minor, 64)
|
||||
serverVersion, err := utilversion.ParseSemantic(serverVersionInfo.String())
|
||||
if err != nil {
|
||||
klog.Warning("Warning: Convert Kubernetes server minor version to float fail")
|
||||
return errors.New("failed to parse Kubernetes server version")
|
||||
}
|
||||
|
||||
if math.Abs(deschedulerMinorVersionFloat-kubernetesMinorVersionFloat) > 3 {
|
||||
klog.Warningf("Warning: Descheduler minor version %v is not supported on your version of Kubernetes %v.%v. See compatibility docs for more info: https://github.com/kubernetes-sigs/descheduler#compatibility-matrix", deschedulerMinorVersion, serverVersion.Major, serverVersion.Minor)
|
||||
deschedulerVersion, err := utilversion.ParseGeneric(versionInfo.GitVersion)
|
||||
if err != nil {
|
||||
return errors.New("failed to convert Descheduler minor version to float")
|
||||
}
|
||||
|
||||
deschedulerMinor := float64(deschedulerVersion.Minor())
|
||||
serverMinor := float64(serverVersion.Minor())
|
||||
if math.Abs(deschedulerMinor-serverMinor) > 3 {
|
||||
return fmt.Errorf(
|
||||
"descheduler version %v may not be supported on your version of Kubernetes %v."+
|
||||
"See compatibility docs for more info: https://github.com/kubernetes-sigs/descheduler#compatibility-matrix",
|
||||
deschedulerVersion.String(),
|
||||
serverVersionInfo.String(),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cachedClient(
|
||||
@@ -205,23 +382,11 @@ func cachedClient(
|
||||
}
|
||||
|
||||
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "RunDeschedulerStrategies")
|
||||
defer span.End()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
namespaceLister := sharedInformerFactory.Core().V1().Namespaces().Lister()
|
||||
priorityClassLister := sharedInformerFactory.Scheduling().V1().PriorityClasses().Lister()
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
var nodeSelector string
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
@@ -234,103 +399,38 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
} else {
|
||||
eventClient = rs.Client
|
||||
}
|
||||
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
|
||||
defer eventBroadcaster.Shutdown()
|
||||
|
||||
cycleSharedInformerFactory := sharedInformerFactory
|
||||
descheduler, err := newDescheduler(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory)
|
||||
if err != nil {
|
||||
span.AddEvent("Failed to create new descheduler", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
return err
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
wait.NonSlidingUntil(func() {
|
||||
loopStartDuration := time.Now()
|
||||
defer metrics.DeschedulerLoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
|
||||
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeLister, nodeSelector)
|
||||
// A next context is created here intentionally to avoid nesting the spans via context.
|
||||
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
|
||||
defer sSpan.End()
|
||||
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, nodeLister, nodeSelector)
|
||||
if err != nil {
|
||||
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
|
||||
sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
klog.Error(err)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
if len(nodes) <= 1 {
|
||||
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||
err = descheduler.runDeschedulerLoop(sCtx, nodes)
|
||||
if err != nil {
|
||||
sSpan.AddEvent("Failed to run descheduler loop", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
klog.Error(err)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
var client clientset.Interface
|
||||
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
|
||||
// So when evicting pods while running multiple strategies in a row have the cummulative effect
|
||||
// as is when evicting pods for real.
|
||||
if rs.DryRun {
|
||||
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
|
||||
// Create a new cache so we start from scratch without any leftovers
|
||||
fakeClient, err := cachedClient(rs.Client, podLister, nodeLister, namespaceLister, priorityClassLister)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// create a new instance of the shared informer factor from the cached client
|
||||
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
// register the pod informer, otherwise it will not get running
|
||||
getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
|
||||
if err != nil {
|
||||
klog.Errorf("build get pods assigned to node function error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fakeCtx, cncl := context.WithCancel(context.TODO())
|
||||
defer cncl()
|
||||
fakeSharedInformerFactory.Start(fakeCtx.Done())
|
||||
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
|
||||
|
||||
client = fakeClient
|
||||
cycleSharedInformerFactory = fakeSharedInformerFactory
|
||||
} else {
|
||||
client = rs.Client
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Building a pod evictor")
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
client,
|
||||
evictionPolicyGroupVersion,
|
||||
rs.DryRun,
|
||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
||||
nodes,
|
||||
!rs.DisableMetrics,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
for _, profile := range deschedulerPolicy.Profiles {
|
||||
currProfile, err := frameworkprofile.NewProfile(
|
||||
profile,
|
||||
pluginregistry.PluginRegistry,
|
||||
frameworkprofile.WithClientSet(client),
|
||||
frameworkprofile.WithSharedInformerFactory(cycleSharedInformerFactory),
|
||||
frameworkprofile.WithPodEvictor(podEvictor),
|
||||
frameworkprofile.WithGetPodsAssignedToNodeFnc(getPodsAssignedToNode),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// First deschedule
|
||||
status := currProfile.RunDeschedulePlugins(ctx, nodes)
|
||||
if status != nil && status.Err != nil {
|
||||
klog.ErrorS(status.Err, "running deschedule extension point failed with error", "profile", profile.Name)
|
||||
continue
|
||||
}
|
||||
// Then balance
|
||||
status = currProfile.RunBalancePlugins(ctx, nodes)
|
||||
if status != nil && status.Err != nil {
|
||||
klog.ErrorS(status.Err, "running balance extension point failed with error", "profile", profile.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
|
||||
|
||||
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
|
||||
if rs.DeschedulingInterval.Seconds() == 0 {
|
||||
cancel()
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
apiversion "k8s.io/apimachinery/pkg/version"
|
||||
fakediscovery "k8s.io/client-go/discovery/fake"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
@@ -20,6 +22,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||
deschedulerversion "sigs.k8s.io/descheduler/pkg/version"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -245,6 +248,67 @@ func TestRootCancelWithNoInterval(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateVersionCompatibility(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
deschedulerVersion string
|
||||
serverVersion string
|
||||
expectError bool
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
name: "no error when descheduler minor equals to server minor",
|
||||
deschedulerVersion: "v0.26",
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "no error when descheduler minor is 3 behind server minor",
|
||||
deschedulerVersion: "0.23",
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "no error when descheduler minor is 3 ahead of server minor",
|
||||
deschedulerVersion: "v0.26",
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "error when descheduler minor is 4 behind server minor",
|
||||
deschedulerVersion: "v0.22",
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "error when descheduler minor is 4 ahead of server minor",
|
||||
deschedulerVersion: "v0.27",
|
||||
serverVersion: "v1.23.1",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "no error when using managed provider version",
|
||||
deschedulerVersion: "v0.25",
|
||||
serverVersion: "v1.25.12-eks-2d98532",
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
client := fakeclientset.NewSimpleClientset()
|
||||
fakeDiscovery, _ := client.Discovery().(*fakediscovery.FakeDiscovery)
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeDiscovery.FakedServerVersion = &apiversion.Info{GitVersion: tc.serverVersion}
|
||||
deschedulerVersion := deschedulerversion.Info{GitVersion: tc.deschedulerVersion}
|
||||
err := validateVersionCompatibility(fakeDiscovery, deschedulerVersion)
|
||||
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
t.Error("unexpected version compatibility behavior")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -30,6 +32,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
// nodePodEvictedCount keeps count of pods evicted on node
|
||||
@@ -113,6 +116,9 @@ type EvictOptions struct {
|
||||
// EvictPod evicts a pod while exercising eviction limits.
|
||||
// Returns true when the pod is evicted on the server side.
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) bool {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "EvictPod", trace.WithAttributes(attribute.String("podName", pod.Name), attribute.String("podNamespace", pod.Namespace), attribute.String("reason", opts.Reason), attribute.String("operation", tracing.EvictOperation)))
|
||||
defer span.End()
|
||||
// TODO: Replace context-propagated Strategy name with a defined framework handle for accessing Strategy info
|
||||
strategy := ""
|
||||
if ctx.Value("strategyName") != nil {
|
||||
@@ -124,7 +130,8 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
}
|
||||
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per node reached"), "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per node reached")))
|
||||
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per node reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -133,13 +140,15 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
}
|
||||
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per namespace reached"), "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per namespace reached")))
|
||||
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per namespace reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
||||
return false
|
||||
}
|
||||
|
||||
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
|
||||
if err != nil {
|
||||
// err is used only for logging purposes
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -120,8 +121,7 @@ func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v
|
||||
}
|
||||
// Check if the pod can fit on a node based off it's requests
|
||||
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
|
||||
ok, reqErrors := fitsRequest(nodeIndexer, pod, node)
|
||||
if !ok {
|
||||
if ok, reqErrors := fitsRequest(nodeIndexer, pod, node); !ok {
|
||||
errors = append(errors, reqErrors...)
|
||||
}
|
||||
}
|
||||
@@ -146,13 +146,11 @@ func PodFitsAnyOtherNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
for _, err := range errors {
|
||||
klog.V(4).InfoS(err.Error())
|
||||
}
|
||||
}
|
||||
klog.V(4).InfoS("Pod does not fit on node",
|
||||
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -164,13 +162,11 @@ func PodFitsAnyNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod,
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
for _, err := range errors {
|
||||
klog.V(4).InfoS(err.Error())
|
||||
}
|
||||
}
|
||||
klog.V(4).InfoS("Pod does not fit on node",
|
||||
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -181,12 +177,11 @@ func PodFitsCurrentNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.P
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
for _, err := range errors {
|
||||
klog.V(4).InfoS(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("Pod does not fit on node",
|
||||
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -294,3 +289,37 @@ func IsBasicResource(name v1.ResourceName) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// GetNodeWeightGivenPodPreferredAffinity returns the weight
|
||||
// that the pod gives to a node by analyzing the soft node affinity of that pod
|
||||
// (nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution)
|
||||
func GetNodeWeightGivenPodPreferredAffinity(pod *v1.Pod, node *v1.Node) int32 {
|
||||
totalWeight, err := utils.GetNodeWeightGivenPodPreferredAffinity(pod, node)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return totalWeight
|
||||
}
|
||||
|
||||
// GetBestNodeWeightGivenPodPreferredAffinity returns the best weight
|
||||
// (maximum one) that the pod gives to the best node by analyzing the soft node affinity
|
||||
// of that pod (nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution)
|
||||
func GetBestNodeWeightGivenPodPreferredAffinity(pod *v1.Pod, nodes []*v1.Node) int32 {
|
||||
var bestWeight int32 = 0
|
||||
for _, node := range nodes {
|
||||
weight := GetNodeWeightGivenPodPreferredAffinity(pod, node)
|
||||
if weight > bestWeight {
|
||||
bestWeight = weight
|
||||
}
|
||||
}
|
||||
return bestWeight
|
||||
}
|
||||
|
||||
// PodMatchNodeSelector checks if a pod node selector matches the node label.
|
||||
func PodMatchNodeSelector(pod *v1.Pod, node *v1.Node) bool {
|
||||
matches, err := utils.PodMatchNodeSelector(pod, node)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
@@ -811,6 +811,78 @@ func TestNodeFit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBestPodNodeAffinityWeight(t *testing.T) {
|
||||
defaultPod := test.BuildTestPod("p1", 0, 0, "node1", func(p *v1.Pod) {
|
||||
p.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Weight: 10,
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: "In",
|
||||
Values: []string{"value1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
tests := []struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedWeight int32
|
||||
}{
|
||||
{
|
||||
description: "No node matches the preferred affinity",
|
||||
pod: defaultPod,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"key2": "value2",
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"key3": "value3",
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedWeight: 0,
|
||||
},
|
||||
{
|
||||
description: "A single node matches the preferred affinity",
|
||||
pod: defaultPod,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node1", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"key1": "value1",
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node2", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"key2": "value2",
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedWeight: 10,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
bestWeight := GetBestNodeWeightGivenPodPreferredAffinity(tc.pod, tc.nodes)
|
||||
if bestWeight != tc.expectedWeight {
|
||||
t.Errorf("Test %#v failed", tc.description)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// createResourceList builds a small resource list of core resources
|
||||
func createResourceList(cpu, memory, ephemeralStorage int64) v1.ResourceList {
|
||||
resourceList := make(map[v1.ResourceName]resource.Quantity)
|
||||
|
||||
@@ -53,8 +53,8 @@ func WrapFilterFuncs(filters ...FilterFunc) FilterFunc {
|
||||
|
||||
type Options struct {
|
||||
filter FilterFunc
|
||||
includedNamespaces sets.String
|
||||
excludedNamespaces sets.String
|
||||
includedNamespaces sets.Set[string]
|
||||
excludedNamespaces sets.Set[string]
|
||||
labelSelector *metav1.LabelSelector
|
||||
}
|
||||
|
||||
@@ -71,13 +71,13 @@ func (o *Options) WithFilter(filter FilterFunc) *Options {
|
||||
}
|
||||
|
||||
// WithNamespaces sets included namespaces
|
||||
func (o *Options) WithNamespaces(namespaces sets.String) *Options {
|
||||
func (o *Options) WithNamespaces(namespaces sets.Set[string]) *Options {
|
||||
o.includedNamespaces = namespaces
|
||||
return o
|
||||
}
|
||||
|
||||
// WithoutNamespaces sets excluded namespaces
|
||||
func (o *Options) WithoutNamespaces(namespaces sets.String) *Options {
|
||||
func (o *Options) WithoutNamespaces(namespaces sets.Set[string]) *Options {
|
||||
o.excludedNamespaces = namespaces
|
||||
return o
|
||||
}
|
||||
@@ -157,6 +157,20 @@ func BuildGetPodsAssignedToNodeFunc(podInformer cache.SharedIndexInformer) (GetP
|
||||
return getPodsAssignedToNode, nil
|
||||
}
|
||||
|
||||
// ListPodsOnNodes returns all pods on given nodes.
|
||||
func ListPodsOnNodes(nodes []*v1.Node, getPodsAssignedToNode GetPodsAssignedToNodeFunc, filter FilterFunc) ([]*v1.Pod, error) {
|
||||
pods := make([]*v1.Pod, 0)
|
||||
for _, node := range nodes {
|
||||
podsOnNode, err := ListPodsOnANode(node.Name, getPodsAssignedToNode, filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pods = append(pods, podsOnNode...)
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
// ListPodsOnANode lists all pods on a node.
|
||||
// It also accepts a "filter" function which can be used to further limit the pods that are returned.
|
||||
// (Usually this is podEvictor.Evictable().IsEvictable, in order to only list the evictable pods on a node, but can
|
||||
@@ -187,6 +201,15 @@ func ListAllPodsOnANode(
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
func GroupByNamespace(pods []*v1.Pod) map[string][]*v1.Pod {
|
||||
m := make(map[string][]*v1.Pod)
|
||||
for i := 0; i < len(pods); i++ {
|
||||
pod := pods[i]
|
||||
m[pod.Namespace] = append(m[pod.Namespace], pod)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// OwnerRef returns the ownerRefList for the pod.
|
||||
func OwnerRef(pod *v1.Pod) []metav1.OwnerReference {
|
||||
return pod.ObjectMeta.GetOwnerReferences()
|
||||
|
||||
@@ -21,6 +21,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -133,29 +135,22 @@ func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interfac
|
||||
}
|
||||
|
||||
func validateDeschedulerConfiguration(in api.DeschedulerPolicy, registry pluginregistry.Registry) error {
|
||||
var errorsInProfiles error
|
||||
var errorsInProfiles []error
|
||||
for _, profile := range in.Profiles {
|
||||
for _, pluginConfig := range profile.PluginConfigs {
|
||||
if _, ok := registry[pluginConfig.Name]; ok {
|
||||
if _, ok := registry[pluginConfig.Name]; !ok {
|
||||
errorsInProfiles = fmt.Errorf("%w: %s", errorsInProfiles, fmt.Sprintf("in profile %s: plugin %s in pluginConfig not registered", profile.Name, pluginConfig.Name))
|
||||
continue
|
||||
}
|
||||
if _, ok := registry[pluginConfig.Name]; !ok {
|
||||
errorsInProfiles = append(errorsInProfiles, fmt.Errorf("in profile %s: plugin %s in pluginConfig not registered", profile.Name, pluginConfig.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
pluginUtilities := registry[pluginConfig.Name]
|
||||
if pluginUtilities.PluginArgValidator == nil {
|
||||
continue
|
||||
}
|
||||
err := pluginUtilities.PluginArgValidator(pluginConfig.Args)
|
||||
if err != nil {
|
||||
if errorsInProfiles == nil {
|
||||
errorsInProfiles = fmt.Errorf("in profile %s: %s", profile.Name, err.Error())
|
||||
} else {
|
||||
errorsInProfiles = fmt.Errorf("%w: %s", errorsInProfiles, fmt.Sprintf("in profile %s: %s", profile.Name, err.Error()))
|
||||
}
|
||||
}
|
||||
pluginUtilities := registry[pluginConfig.Name]
|
||||
if pluginUtilities.PluginArgValidator == nil {
|
||||
continue
|
||||
}
|
||||
if err := pluginUtilities.PluginArgValidator(pluginConfig.Args); err != nil {
|
||||
errorsInProfiles = append(errorsInProfiles, fmt.Errorf("in profile %s: %s", profile.Name, err.Error()))
|
||||
}
|
||||
}
|
||||
}
|
||||
return errorsInProfiles
|
||||
return utilerrors.NewAggregate(errorsInProfiles)
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -399,7 +400,10 @@ func TestV1alpha1ToV1alpha2(t *testing.T) {
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
@@ -714,7 +718,8 @@ func TestV1alpha1ToV1alpha2(t *testing.T) {
|
||||
{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
IncludeSoftConstraints: true,
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -857,6 +862,79 @@ strategies:
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "v1alpha1 to internal with priorityThreshold",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 5
|
||||
namespaces:
|
||||
include:
|
||||
- "testleaderelection-a"
|
||||
thresholdPriority: null
|
||||
thresholdPriorityClassName: prioritym
|
||||
`),
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", podlifetime.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: "DefaultEvictor",
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: utilpointer.Int32(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: podlifetime.PluginName,
|
||||
Args: &podlifetime.PodLifeTimeArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Include: []string{"testleaderelection-a"},
|
||||
},
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(5),
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{podlifetime.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "v1alpha1 to internal with priorityThreshold value and name should return error",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 5
|
||||
namespaces:
|
||||
include:
|
||||
- "testleaderelection-a"
|
||||
thresholdPriority: 222
|
||||
thresholdPriorityClassName: prioritym
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("failed decoding descheduler's policy config \"filename\": priority threshold misconfigured for plugin PodLifeTime"),
|
||||
},
|
||||
{
|
||||
description: "v1alpha2 to internal",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
@@ -925,7 +1003,7 @@ profiles:
|
||||
if err != nil {
|
||||
if tc.err == nil {
|
||||
t.Errorf("unexpected error: %s.", err.Error())
|
||||
} else {
|
||||
} else if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s. Was expecting %s", err.Error(), tc.err.Error())
|
||||
}
|
||||
}
|
||||
@@ -985,7 +1063,7 @@ func TestValidateDeschedulerConfiguration(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("in profile RemoveFailedPods: only one of Include/Exclude namespaces can be set: in profile RemovePodsViolatingTopologySpreadConstraint: only one of Include/Exclude namespaces can be set"),
|
||||
result: fmt.Errorf("[in profile RemoveFailedPods: only one of Include/Exclude namespaces can be set, in profile RemovePodsViolatingTopologySpreadConstraint: only one of Include/Exclude namespaces can be set]"),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -156,11 +156,11 @@ func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
if defaultEvictorArgs.NodeFit {
|
||||
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes().Lister(), defaultEvictorArgs.NodeSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(fmt.Errorf("Pod fails the following checks"), "pod", klog.KObj(pod))
|
||||
klog.ErrorS(err, "unable to list ready nodes", "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
if !nodeutil.PodFitsAnyOtherNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) {
|
||||
klog.ErrorS(fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable"), "pod", klog.KObj(pod))
|
||||
klog.InfoS("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable", "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
func ValidateDefaultEvictorArgs(obj runtime.Object) error {
|
||||
args := obj.(*DefaultEvictorArgs)
|
||||
|
||||
if args.PriorityThreshold != nil && len(args.PriorityThreshold.Name) > 0 {
|
||||
if args.PriorityThreshold != nil && args.PriorityThreshold.Value != nil && len(args.PriorityThreshold.Name) > 0 {
|
||||
return fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set, got %v", args)
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -163,6 +164,10 @@ func resourceThreshold(nodeCapacity v1.ResourceList, resourceName v1.ResourceNam
|
||||
return resource.NewQuantity(resourceCapacityFraction(resourceCapacityQuantity.Value()), defaultFormat)
|
||||
}
|
||||
|
||||
func roundTo2Decimals(percentage float64) float64 {
|
||||
return math.Round(percentage*100) / 100
|
||||
}
|
||||
|
||||
func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
|
||||
nodeCapacity := nodeUsage.node.Status.Capacity
|
||||
if len(nodeUsage.node.Status.Allocatable) > 0 {
|
||||
@@ -174,6 +179,7 @@ func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
|
||||
cap := nodeCapacity[resourceName]
|
||||
if !cap.IsZero() {
|
||||
resourceUsagePercentage[resourceName] = 100 * float64(resourceUsage.MilliValue()) / float64(cap.MilliValue())
|
||||
resourceUsagePercentage[resourceName] = roundTo2Decimals(resourceUsagePercentage[resourceName])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -282,9 +288,9 @@ func evictPods(
|
||||
podEvictor frameworktypes.Evictor,
|
||||
continueEviction continueEvictionCond,
|
||||
) {
|
||||
var excludedNamespaces sets.String
|
||||
var excludedNamespaces sets.Set[string]
|
||||
if evictableNamespaces != nil {
|
||||
excludedNamespaces = sets.NewString(evictableNamespaces.Exclude...)
|
||||
excludedNamespaces = sets.New(evictableNamespaces.Exclude...)
|
||||
}
|
||||
|
||||
if continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
|
||||
@@ -49,10 +49,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil, fmt.Errorf("want args to be of type PodLifeTimeArgs, got %T", args)
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if podLifeTimeArgs.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(podLifeTimeArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(podLifeTimeArgs.Namespaces.Exclude...)
|
||||
includedNamespaces = sets.New(podLifeTimeArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.New(podLifeTimeArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
@@ -72,7 +72,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
})
|
||||
|
||||
if len(podLifeTimeArgs.States) > 0 {
|
||||
states := sets.NewString(podLifeTimeArgs.States...)
|
||||
states := sets.New(podLifeTimeArgs.States...)
|
||||
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
||||
if states.Has(string(pod.Status.Phase)) {
|
||||
return true
|
||||
@@ -106,7 +106,7 @@ func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *framewo
|
||||
nodeMap := make(map[string]*v1.Node, len(nodes))
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
|
||||
@@ -43,7 +43,7 @@ func ValidatePodLifeTimeArgs(obj runtime.Object) error {
|
||||
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
|
||||
}
|
||||
}
|
||||
podLifeTimeAllowedStates := sets.NewString(
|
||||
podLifeTimeAllowedStates := sets.New(
|
||||
string(v1.PodRunning),
|
||||
string(v1.PodPending),
|
||||
|
||||
@@ -53,7 +53,7 @@ func ValidatePodLifeTimeArgs(obj runtime.Object) error {
|
||||
)
|
||||
|
||||
if !podLifeTimeAllowedStates.HasAll(args.States...) {
|
||||
return fmt.Errorf("states must be one of %v", podLifeTimeAllowedStates.List())
|
||||
return fmt.Errorf("states must be one of %v", podLifeTimeAllowedStates.UnsortedList())
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -57,6 +57,10 @@ type podOwner struct {
|
||||
imagesHash string
|
||||
}
|
||||
|
||||
func (po podOwner) String() string {
|
||||
return fmt.Sprintf("%s/%s/%s/%s", po.namespace, po.kind, po.name, po.imagesHash)
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
removeDuplicatesArgs, ok := args.(*RemoveDuplicatesArgs)
|
||||
@@ -64,10 +68,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil, fmt.Errorf("want args to be of type RemoveDuplicatesArgs, got %T", args)
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if removeDuplicatesArgs.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(removeDuplicatesArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(removeDuplicatesArgs.Namespaces.Exclude...)
|
||||
includedNamespaces = sets.New(removeDuplicatesArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.New(removeDuplicatesArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
@@ -100,7 +104,7 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
|
||||
nodeMap := make(map[string]*v1.Node)
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, r.handle.GetPodsAssignedToNodeFunc(), r.podFilter)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error listing evictable pods on node", "node", klog.KObj(node))
|
||||
@@ -279,7 +283,7 @@ func hasExcludedOwnerRefKind(ownerRefs []metav1.OwnerReference, excludeOwnerKind
|
||||
return false
|
||||
}
|
||||
|
||||
exclude := sets.NewString(excludeOwnerKinds...)
|
||||
exclude := sets.New(excludeOwnerKinds...)
|
||||
for _, owner := range ownerRefs {
|
||||
if exclude.Has(owner.Kind) {
|
||||
return true
|
||||
|
||||
@@ -50,10 +50,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil, fmt.Errorf("want args to be of type RemoveFailedPodsArgs, got %T", args)
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if failedPodsArgs.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(failedPodsArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(failedPodsArgs.Namespaces.Exclude...)
|
||||
includedNamespaces = sets.New(failedPodsArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.New(failedPodsArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
@@ -93,7 +93,7 @@ func (d *RemoveFailedPods) Name() string {
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
@@ -126,7 +126,7 @@ func validateCanEvict(pod *v1.Pod, failedPodArgs *RemoveFailedPodsArgs) error {
|
||||
if len(failedPodArgs.ExcludeOwnerKinds) > 0 {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
for _, owner := range ownerRefList {
|
||||
if sets.NewString(failedPodArgs.ExcludeOwnerKinds...).Has(owner.Kind) {
|
||||
if sets.New(failedPodArgs.ExcludeOwnerKinds...).Has(owner.Kind) {
|
||||
errs = append(errs, fmt.Errorf("pod's owner kind of %s is excluded", owner.Kind))
|
||||
}
|
||||
}
|
||||
@@ -143,7 +143,7 @@ func validateCanEvict(pod *v1.Pod, failedPodArgs *RemoveFailedPodsArgs) error {
|
||||
reasons = append(reasons, getFailedContainerStatusReasons(pod.Status.InitContainerStatuses)...)
|
||||
}
|
||||
|
||||
if !sets.NewString(failedPodArgs.Reasons...).HasAny(reasons...) {
|
||||
if !sets.New(failedPodArgs.Reasons...).HasAny(reasons...) {
|
||||
errs = append(errs, fmt.Errorf("pod does not match any of the reasons"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,4 +37,7 @@ func SetDefaults_RemovePodsHavingTooManyRestartsArgs(obj runtime.Object) {
|
||||
if !args.IncludingInitContainers {
|
||||
args.IncludingInitContainers = false
|
||||
}
|
||||
if args.States == nil {
|
||||
args.States = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
@@ -37,6 +38,7 @@ func TestSetDefaults_RemovePodsHavingTooManyRestartsArgs(t *testing.T) {
|
||||
LabelSelector: nil,
|
||||
PodRestartThreshold: 0,
|
||||
IncludingInitContainers: false,
|
||||
States: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -46,12 +48,14 @@ func TestSetDefaults_RemovePodsHavingTooManyRestartsArgs(t *testing.T) {
|
||||
LabelSelector: &metav1.LabelSelector{},
|
||||
PodRestartThreshold: 10,
|
||||
IncludingInitContainers: true,
|
||||
States: []string{string(v1.PodRunning)},
|
||||
},
|
||||
want: &RemovePodsHavingTooManyRestartsArgs{
|
||||
Namespaces: &api.Namespaces{},
|
||||
LabelSelector: &metav1.LabelSelector{},
|
||||
PodRestartThreshold: 10,
|
||||
IncludingInitContainers: true,
|
||||
States: []string{string(v1.PodRunning)},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -50,10 +50,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsHavingTooManyRestartsArgs, got %T", args)
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if tooManyRestartsArgs.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(tooManyRestartsArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(tooManyRestartsArgs.Namespaces.Exclude...)
|
||||
includedNamespaces = sets.New(tooManyRestartsArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.New(tooManyRestartsArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
@@ -75,6 +75,23 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return true
|
||||
})
|
||||
|
||||
if len(tooManyRestartsArgs.States) > 0 {
|
||||
states := sets.New(tooManyRestartsArgs.States...)
|
||||
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
||||
if states.Has(string(pod.Status.Phase)) {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, containerStatus := range pod.Status.ContainerStatuses {
|
||||
if containerStatus.State.Waiting != nil && states.Has(containerStatus.State.Waiting.Reason) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
return &RemovePodsHavingTooManyRestarts{
|
||||
handle: handle,
|
||||
args: tooManyRestartsArgs,
|
||||
@@ -90,7 +107,7 @@ func (d *RemovePodsHavingTooManyRestarts) Name() string {
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
|
||||
@@ -104,8 +104,6 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
node4 := test.BuildTestNode("node4", 200, 3000, 10, nil)
|
||||
node5 := test.BuildTestNode("node5", 2000, 3000, 10, nil)
|
||||
|
||||
pods := append(append(initPods(node1), test.BuildTestPod("CPU-consumer-1", 150, 100, node4.Name, nil)), test.BuildTestPod("CPU-consumer-2", 150, 100, node5.Name, nil))
|
||||
|
||||
createRemovePodsHavingTooManyRestartsAgrs := func(
|
||||
podRestartThresholds int32,
|
||||
includingInitContainers bool,
|
||||
@@ -126,6 +124,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxNoOfPodsToEvictPerNamespace *uint
|
||||
nodeFit bool
|
||||
applyFunc func([]*v1.Pod)
|
||||
}{
|
||||
{
|
||||
description: "All pods have total restarts under threshold, no pod evictions",
|
||||
@@ -191,7 +190,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
maxNoOfPodsToEvictPerNamespace: &uint3,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tained, 0 pod evictions",
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tainted, 0 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
expectedEvictedPodCount: 0,
|
||||
@@ -222,10 +221,68 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "pods are in CrashLoopBackOff with states=CrashLoopBackOff, 3 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
applyFunc: func(pods []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
if len(pod.Status.ContainerStatuses) > 0 {
|
||||
pod.Status.ContainerStatuses[0].State = v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pods without CrashLoopBackOff with states=CrashLoopBackOff, 0 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "pods running with state=Running, 3 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
applyFunc: func(pods []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
pod.Status.Phase = v1.PodRunning
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pods pending with state=Running, 0 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
applyFunc: func(pods []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
pod.Status.Phase = v1.PodPending
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
pods := append(
|
||||
initPods(node1),
|
||||
test.BuildTestPod("CPU-consumer-1", 150, 100, node4.Name, nil),
|
||||
test.BuildTestPod("CPU-consumer-2", 150, 100, node5.Name, nil),
|
||||
)
|
||||
if tc.applyFunc != nil {
|
||||
tc.applyFunc(pods)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
|
||||
@@ -29,4 +29,5 @@ type RemovePodsHavingTooManyRestartsArgs struct {
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
PodRestartThreshold int32 `json:"podRestartThreshold"`
|
||||
IncludingInitContainers bool `json:"includingInitContainers"`
|
||||
States []string `json:"states"`
|
||||
}
|
||||
|
||||
@@ -16,8 +16,10 @@ package removepodshavingtoomanyrestarts
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// ValidateRemovePodsHavingTooManyRestartsArgs validates RemovePodsHavingTooManyRestarts arguments
|
||||
@@ -38,5 +40,17 @@ func ValidateRemovePodsHavingTooManyRestartsArgs(obj runtime.Object) error {
|
||||
return fmt.Errorf("invalid PodsHavingTooManyRestarts threshold")
|
||||
}
|
||||
|
||||
allowedStates := sets.New(
|
||||
// Pod phases:
|
||||
string(v1.PodRunning),
|
||||
|
||||
// Container state reasons:
|
||||
"CrashLoopBackOff",
|
||||
)
|
||||
|
||||
if !allowedStates.HasAll(args.States...) {
|
||||
return fmt.Errorf("states must be one of %v", allowedStates.UnsortedList())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package removepodshavingtoomanyrestarts
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestValidateRemovePodsHavingTooManyRestartsArgs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
args *RemovePodsHavingTooManyRestartsArgs
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
description: "valid arg, no errors",
|
||||
args: &RemovePodsHavingTooManyRestartsArgs{
|
||||
PodRestartThreshold: 1,
|
||||
States: []string{string(v1.PodRunning)},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid PodRestartThreshold arg, expects errors",
|
||||
args: &RemovePodsHavingTooManyRestartsArgs{
|
||||
PodRestartThreshold: 0,
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "invalid States arg, expects errors",
|
||||
args: &RemovePodsHavingTooManyRestartsArgs{
|
||||
PodRestartThreshold: 1,
|
||||
States: []string{string(v1.PodFailed)},
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "allows CrashLoopBackOff state",
|
||||
args: &RemovePodsHavingTooManyRestartsArgs{
|
||||
PodRestartThreshold: 1,
|
||||
States: []string{"CrashLoopBackOff"},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := ValidateRemovePodsHavingTooManyRestartsArgs(tc.args)
|
||||
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
t.Error("unexpected arg validation behavior")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -41,6 +41,11 @@ func (in *RemovePodsHavingTooManyRestartsArgs) DeepCopyInto(out *RemovePodsHavin
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.States != nil {
|
||||
in, out := &in.States, &out.States
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -50,10 +50,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingInterPodAntiAffinityArgs, got %T", args)
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if interPodAntiAffinityArgs.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(interPodAntiAffinityArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(interPodAntiAffinityArgs.Namespaces.Exclude...)
|
||||
includedNamespaces = sets.New(interPodAntiAffinityArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.New(interPodAntiAffinityArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
@@ -78,24 +78,31 @@ func (d *RemovePodsViolatingInterPodAntiAffinity) Name() string {
|
||||
}
|
||||
|
||||
func (d *RemovePodsViolatingInterPodAntiAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
pods, err := podutil.ListPodsOnNodes(nodes, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing all pods: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
podsInANamespace := podutil.GroupByNamespace(pods)
|
||||
podsOnANode := groupByNodeName(pods)
|
||||
nodeMap := createNodeMap(nodes)
|
||||
|
||||
loop:
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods: %v", err),
|
||||
}
|
||||
}
|
||||
// sort the evictable Pods based on priority, if there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods := podsOnANode[node.Name]
|
||||
// sort the evict-able Pods based on priority, if there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(pods)
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) && d.handle.Evictor().Filter(pods[i]) && d.handle.Evictor().PreEvictionFilter(pods[i]) {
|
||||
if checkPodsWithAntiAffinityExist(pods[i], podsInANamespace, nodeMap) && d.handle.Evictor().Filter(pods[i]) && d.handle.Evictor().PreEvictionFilter(pods[i]) {
|
||||
if d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{}) {
|
||||
// Since the current pod is evicted all other pods which have anti-affinity with this
|
||||
// pod need not be evicted.
|
||||
// Update pods.
|
||||
// Update allPods.
|
||||
podsInANamespace = removePodFromNamespaceMap(pods[i], podsInANamespace)
|
||||
pods = append(pods[:i], pods[i+1:]...)
|
||||
i--
|
||||
totalPods--
|
||||
@@ -109,8 +116,40 @@ loop:
|
||||
return nil
|
||||
}
|
||||
|
||||
func removePodFromNamespaceMap(podToRemove *v1.Pod, podMap map[string][]*v1.Pod) map[string][]*v1.Pod {
|
||||
podList, ok := podMap[podToRemove.Namespace]
|
||||
if !ok {
|
||||
return podMap
|
||||
}
|
||||
for i := 0; i < len(podList); i++ {
|
||||
podToCheck := podList[i]
|
||||
if podToRemove.Name == podToCheck.Name {
|
||||
podMap[podToRemove.Namespace] = append(podList[:i], podList[i+1:]...)
|
||||
return podMap
|
||||
}
|
||||
}
|
||||
return podMap
|
||||
}
|
||||
|
||||
func groupByNodeName(pods []*v1.Pod) map[string][]*v1.Pod {
|
||||
m := make(map[string][]*v1.Pod)
|
||||
for i := 0; i < len(pods); i++ {
|
||||
pod := pods[i]
|
||||
m[pod.Spec.NodeName] = append(m[pod.Spec.NodeName], pod)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func createNodeMap(nodes []*v1.Node) map[string]*v1.Node {
|
||||
m := make(map[string]*v1.Node, len(nodes))
|
||||
for _, node := range nodes {
|
||||
m[node.GetName()] = node
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// checkPodsWithAntiAffinityExist checks if there are other pods on the node that the current pod cannot tolerate.
|
||||
func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods []*v1.Pod) bool {
|
||||
func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods map[string][]*v1.Pod, nodeMap map[string]*v1.Node) bool {
|
||||
affinity := pod.Spec.Affinity
|
||||
if affinity != nil && affinity.PodAntiAffinity != nil {
|
||||
for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) {
|
||||
@@ -120,9 +159,22 @@ func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods []*v1.Pod) bool {
|
||||
klog.ErrorS(err, "Unable to convert LabelSelector into Selector")
|
||||
return false
|
||||
}
|
||||
for _, existingPod := range pods {
|
||||
if existingPod.Name != pod.Name && utils.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) {
|
||||
return true
|
||||
for namespace := range namespaces {
|
||||
for _, existingPod := range pods[namespace] {
|
||||
if existingPod.Name != pod.Name && utils.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) {
|
||||
node, ok := nodeMap[pod.Spec.NodeName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
nodeHavingExistingPod, ok := nodeMap[existingPod.Spec.NodeName]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if hasSameLabelValue(node, nodeHavingExistingPod, term.TopologyKey) {
|
||||
klog.V(1).InfoS("Found Pods violating PodAntiAffinity", "pod to evicted", klog.KObj(pod))
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -130,6 +182,29 @@ func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods []*v1.Pod) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func hasSameLabelValue(node1, node2 *v1.Node, key string) bool {
|
||||
if node1.Name == node2.Name {
|
||||
return true
|
||||
}
|
||||
node1Labels := node1.Labels
|
||||
if node1Labels == nil {
|
||||
return false
|
||||
}
|
||||
node2Labels := node2.Labels
|
||||
if node2Labels == nil {
|
||||
return false
|
||||
}
|
||||
value1, ok := node1Labels[key]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
value2, ok := node2Labels[key]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return value1 == value2
|
||||
}
|
||||
|
||||
// getPodAntiAffinityTerms gets the antiaffinity terms for the given pod.
|
||||
func getPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) {
|
||||
if podAntiAffinity != nil {
|
||||
|
||||
@@ -38,19 +38,27 @@ import (
|
||||
)
|
||||
|
||||
func TestPodAntiAffinity(t *testing.T) {
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"region": "main-region",
|
||||
}
|
||||
})
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"datacenter": "east",
|
||||
}
|
||||
})
|
||||
|
||||
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
})
|
||||
node4 := test.BuildTestNode("n4", 2, 2, 1, nil)
|
||||
node5 := test.BuildTestNode("n5", 200, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"region": "main-region",
|
||||
}
|
||||
})
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||
@@ -62,6 +70,7 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node1.Name, nil)
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node1.Name, nil)
|
||||
p11 := test.BuildTestPod("p11", 100, 0, node5.Name, nil)
|
||||
p9.DeletionTimestamp = &metav1.Time{}
|
||||
p10.DeletionTimestamp = &metav1.Time{}
|
||||
|
||||
@@ -73,6 +82,7 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
p5.Labels = map[string]string{"foo": "bar"}
|
||||
p6.Labels = map[string]string{"foo": "bar"}
|
||||
p7.Labels = map[string]string{"foo1": "bar1"}
|
||||
p11.Labels = map[string]string{"foo": "bar"}
|
||||
nonEvictablePod.Labels = map[string]string{"foo": "bar"}
|
||||
test.SetNormalOwnerRef(p1)
|
||||
test.SetNormalOwnerRef(p2)
|
||||
@@ -83,6 +93,7 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
test.SetNormalOwnerRef(p7)
|
||||
test.SetNormalOwnerRef(p9)
|
||||
test.SetNormalOwnerRef(p10)
|
||||
test.SetNormalOwnerRef(p11)
|
||||
|
||||
// set pod anti affinity
|
||||
setPodAntiAffinity(p1, "foo", "bar")
|
||||
@@ -186,6 +197,13 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
expectedEvictedPodCount: 0,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "Evict pod violating anti-affinity among different node (all pods have anti-affinity)",
|
||||
pods: []*v1.Pod{p1, p11},
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
expectedEvictedPodCount: 1,
|
||||
nodeFit: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
const PluginName = "RemovePodsViolatingNodeAffinity"
|
||||
@@ -46,10 +47,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingNodeAffinityArgs, got %T", args)
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if nodeAffinityArgs.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(nodeAffinityArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(nodeAffinityArgs.Namespaces.Exclude...)
|
||||
includedNamespaces = sets.New(nodeAffinityArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.New(nodeAffinityArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
@@ -78,40 +79,68 @@ func (d *RemovePodsViolatingNodeAffinity) Name() string {
|
||||
func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
for _, nodeAffinity := range d.args.NodeAffinityType {
|
||||
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||
var err *frameworktypes.Status = nil
|
||||
|
||||
// The pods that we'll evict must be evictable. For example, the current number of replicas
|
||||
// must be greater than the pdb.minValue.
|
||||
// The pods must be able to get scheduled on a different node. Otherwise, it doesn't make much
|
||||
// sense to evict them.
|
||||
switch nodeAffinity {
|
||||
case "requiredDuringSchedulingIgnoredDuringExecution":
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
node.Name,
|
||||
d.handle.GetPodsAssignedToNodeFunc(),
|
||||
podutil.WrapFilterFuncs(d.podFilter, func(pod *v1.Pod) bool {
|
||||
return d.handle.Evictor().Filter(pod) &&
|
||||
!nodeutil.PodFitsCurrentNode(d.handle.GetPodsAssignedToNodeFunc(), pod, node) &&
|
||||
nodeutil.PodFitsAnyNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes)
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
|
||||
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
||||
if d.handle.Evictor().NodeLimitExceeded(node) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// In this specific case, the pod must also violate the nodeSelector to be evicted
|
||||
filterFunc := func(pod *v1.Pod, node *v1.Node, nodes []*v1.Node) bool {
|
||||
return utils.PodHasNodeAffinity(pod, utils.RequiredDuringSchedulingIgnoredDuringExecution) &&
|
||||
d.handle.Evictor().Filter(pod) &&
|
||||
nodeutil.PodFitsAnyNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) &&
|
||||
!nodeutil.PodMatchNodeSelector(pod, node)
|
||||
}
|
||||
err = d.processNodes(ctx, nodes, filterFunc)
|
||||
case "preferredDuringSchedulingIgnoredDuringExecution":
|
||||
// In this specific case, the pod must have a better fit on another node than
|
||||
// in the current one based on the preferred node affinity
|
||||
filterFunc := func(pod *v1.Pod, node *v1.Node, nodes []*v1.Node) bool {
|
||||
return utils.PodHasNodeAffinity(pod, utils.PreferredDuringSchedulingIgnoredDuringExecution) &&
|
||||
d.handle.Evictor().Filter(pod) &&
|
||||
nodeutil.PodFitsAnyNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) &&
|
||||
(nodeutil.GetBestNodeWeightGivenPodPreferredAffinity(pod, nodes) > nodeutil.GetNodeWeightGivenPodPreferredAffinity(pod, node))
|
||||
}
|
||||
err = d.processNodes(ctx, nodes, filterFunc)
|
||||
default:
|
||||
klog.ErrorS(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *RemovePodsViolatingNodeAffinity) processNodes(ctx context.Context, nodes []*v1.Node, filterFunc func(*v1.Pod, *v1.Node, []*v1.Node) bool) *frameworktypes.Status {
|
||||
for _, node := range nodes {
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
|
||||
// Potentially evictable pods
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
node.Name,
|
||||
d.handle.GetPodsAssignedToNodeFunc(),
|
||||
podutil.WrapFilterFuncs(d.podFilter, func(pod *v1.Pod) bool {
|
||||
return filterFunc(pod, node, nodes)
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
|
||||
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
||||
if d.handle.Evictor().NodeLimitExceeded(node) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -48,26 +48,49 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
unschedulableNodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||
unschedulableNodeWithLabels.Spec.Unschedulable = true
|
||||
|
||||
addPodsToNode := func(node *v1.Node, deletionTimestamp *metav1.Time) []*v1.Pod {
|
||||
addPodsToNode := func(node *v1.Node, deletionTimestamp *metav1.Time, affinityType string) []*v1.Pod {
|
||||
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name, nil)
|
||||
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
NodeAffinity: &v1.NodeAffinity{},
|
||||
}
|
||||
|
||||
switch affinityType {
|
||||
case "requiredDuringSchedulingIgnoredDuringExecution":
|
||||
podWithNodeAffinity.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case "preferredDuringSchedulingIgnoredDuringExecution":
|
||||
podWithNodeAffinity.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Weight: 10,
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case "requiredDuringSchedulingRequiredDuringExecution":
|
||||
default:
|
||||
t.Fatalf("Invalid affinity type %s", affinityType)
|
||||
}
|
||||
|
||||
pod1 := test.BuildTestPod("pod1", 100, 0, node.Name, nil)
|
||||
@@ -77,6 +100,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
podWithNodeAffinity.DeletionTimestamp = deletionTimestamp
|
||||
pod1.DeletionTimestamp = deletionTimestamp
|
||||
pod2.DeletionTimestamp = deletionTimestamp
|
||||
|
||||
@@ -87,6 +111,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var uint0 uint = 0
|
||||
var uint1 uint = 1
|
||||
tests := []struct {
|
||||
description string
|
||||
@@ -104,16 +129,25 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingRequiredDuringExecution"},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingRequiredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
},
|
||||
{
|
||||
description: "Pod is correctly scheduled on node, no eviction expected",
|
||||
description: "Pod is correctly scheduled on node, no eviction expected [required affinity]",
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithLabels, nil),
|
||||
pods: addPodsToNode(nodeWithLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
},
|
||||
{
|
||||
description: "Pod is correctly scheduled on node, no eviction expected [preferred affinity]",
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
},
|
||||
{
|
||||
@@ -122,66 +156,176 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should not be evicted",
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available with better fit, should be evicted",
|
||||
expectedEvictedPodCount: 1,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should be evicted [required affinity]",
|
||||
expectedEvictedPodCount: 1,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, no pod evicted since pod terminting",
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should be evicted [preferred affinity]",
|
||||
expectedEvictedPodCount: 1,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, should not be evicted",
|
||||
expectedEvictedPodCount: 1,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, no pod evicted since pod terminting",
|
||||
expectedEvictedPodCount: 1,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 0, should be not evicted [required affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 0, should be not evicted [preferred affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, no pod evicted since pod terminating [required affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, no pod evicted since pod terminating [preferred affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, should be evicted [required affinity]",
|
||||
expectedEvictedPodCount: 1,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, should be evicted [preferred affinity]",
|
||||
expectedEvictedPodCount: 1,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 0, should not be evicted [required affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 0, should not be evicted [preferred affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, no pod evicted since pod terminting [required affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, no pod evicted since pod terminting [preferred affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict [required affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, and node where pod fits is available, should evict",
|
||||
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict [preferred affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, and unschedulable node where pod could fit is available, should not evict [required affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil),
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, and unschedulable node where pod could fit is available, should not evict [preferred affinity]",
|
||||
expectedEvictedPodCount: 0,
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
nodefit: true,
|
||||
|
||||
@@ -50,10 +50,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingNodeTaintsArgs, got %T", args)
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if nodeTaintsArgs.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(nodeTaintsArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(nodeTaintsArgs.Namespaces.Exclude...)
|
||||
includedNamespaces = sets.New(nodeTaintsArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.New(nodeTaintsArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
|
||||
@@ -67,7 +67,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
|
||||
excludedTaints := sets.NewString(nodeTaintsArgs.ExcludedTaints...)
|
||||
excludedTaints := sets.New(nodeTaintsArgs.ExcludedTaints...)
|
||||
excludeTaint := func(taint *v1.Taint) bool {
|
||||
// Exclude taints by key *or* key=value
|
||||
return excludedTaints.Has(taint.Key) || (taint.Value != "" && excludedTaints.Has(fmt.Sprintf("%s=%s", taint.Key, taint.Value)))
|
||||
|
||||
@@ -14,7 +14,9 @@ limitations under the License.
|
||||
package removepodsviolatingtopologyspreadconstraint
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
@@ -31,7 +33,10 @@ func SetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(obj runtime.Obj
|
||||
if args.LabelSelector == nil {
|
||||
args.LabelSelector = nil
|
||||
}
|
||||
if !args.IncludeSoftConstraints {
|
||||
args.IncludeSoftConstraints = false
|
||||
if args.TopologyBalanceNodeFit == nil {
|
||||
args.TopologyBalanceNodeFit = utilpointer.Bool(true)
|
||||
}
|
||||
if len(args.Constraints) == 0 {
|
||||
args.Constraints = append(args.Constraints, v1.DoNotSchedule)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,12 +17,24 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
var scheme *runtime.Scheme
|
||||
|
||||
func init() {
|
||||
scheme = runtime.NewScheme()
|
||||
scheme.AddTypeDefaultingFunc(&RemovePodsViolatingTopologySpreadConstraintArgs{}, func(obj interface{}) {
|
||||
SetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(obj.(*RemovePodsViolatingTopologySpreadConstraintArgs))
|
||||
})
|
||||
utilruntime.Must(AddToScheme(scheme))
|
||||
}
|
||||
|
||||
func TestSetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -35,26 +47,54 @@ func TestSetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(t *testing.
|
||||
want: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Namespaces: nil,
|
||||
LabelSelector: nil,
|
||||
IncludeSoftConstraints: false,
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "RemovePodsViolatingTopologySpreadConstraintArgs with value",
|
||||
in: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Namespaces: &api.Namespaces{},
|
||||
LabelSelector: &metav1.LabelSelector{},
|
||||
IncludeSoftConstraints: true,
|
||||
Namespaces: &api.Namespaces{},
|
||||
LabelSelector: &metav1.LabelSelector{},
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
},
|
||||
want: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Namespaces: &api.Namespaces{},
|
||||
LabelSelector: &metav1.LabelSelector{},
|
||||
IncludeSoftConstraints: true,
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "RemovePodsViolatingTopologySpreadConstraintArgs without TopologyBalanceNodeFit",
|
||||
in: &RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
want: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "RemovePodsViolatingTopologySpreadConstraintArgs with TopologyBalanceNodeFit=false",
|
||||
in: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(false),
|
||||
},
|
||||
want: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(false),
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "RemovePodsViolatingTopologySpreadConstraintArgs with nil constraints",
|
||||
in: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: nil,
|
||||
},
|
||||
want: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
scheme := runtime.NewScheme()
|
||||
utilruntime.Must(AddToScheme(scheme))
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
scheme.Default(tc.in)
|
||||
if diff := cmp.Diff(tc.in, tc.want); diff != "" {
|
||||
|
||||
@@ -26,7 +26,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
|
||||
v1helper "k8s.io/component-helpers/scheduling/corev1"
|
||||
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -47,6 +50,19 @@ type topology struct {
|
||||
pods []*v1.Pod
|
||||
}
|
||||
|
||||
// topologySpreadConstraint is an internal version for v1.TopologySpreadConstraint
|
||||
// and where the selector is parsed.
|
||||
// This mirrors scheduler: https://github.com/kubernetes/kubernetes/blob/release-1.28/pkg/scheduler/framework/plugins/podtopologyspread/common.go#L37
|
||||
type topologySpreadConstraint struct {
|
||||
maxSkew int32
|
||||
topologyKey string
|
||||
selector labels.Selector
|
||||
nodeAffinityPolicy v1.NodeInclusionPolicy
|
||||
nodeTaintsPolicy v1.NodeInclusionPolicy
|
||||
podNodeAffinity nodeaffinity.RequiredNodeAffinity
|
||||
podTolerations []v1.Toleration
|
||||
}
|
||||
|
||||
// RemovePodsViolatingTopologySpreadConstraint evicts pods which violate their topology spread constraints
|
||||
type RemovePodsViolatingTopologySpreadConstraint struct {
|
||||
handle frameworktypes.Handle
|
||||
@@ -102,50 +118,56 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
||||
// iterate through all topoPairs for this topologyKey and diff currentPods -minPods <=maxSkew
|
||||
// if diff > maxSkew, add this pod in the current bucket for eviction
|
||||
|
||||
// First record all of the constraints by namespace
|
||||
client := d.handle.ClientSet()
|
||||
namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Couldn't list namespaces")
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("list namespace: %w", err),
|
||||
}
|
||||
}
|
||||
klog.V(1).InfoS("Processing namespaces for topology spread constraints")
|
||||
klog.V(1).Info("Processing namespaces for topology spread constraints")
|
||||
podsForEviction := make(map[*v1.Pod]struct{})
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if d.args.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(d.args.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(d.args.Namespaces.Exclude...)
|
||||
includedNamespaces = sets.New(d.args.Namespaces.Include...)
|
||||
excludedNamespaces = sets.New(d.args.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// 1. for each namespace...
|
||||
for _, namespace := range namespaces.Items {
|
||||
if (len(includedNamespaces) > 0 && !includedNamespaces.Has(namespace.Name)) ||
|
||||
(len(excludedNamespaces) > 0 && excludedNamespaces.Has(namespace.Name)) {
|
||||
continue
|
||||
pods, err := podutil.ListPodsOnNodes(nodes, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error listing all pods: %v", err),
|
||||
}
|
||||
namespacePods, err := client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Couldn't list pods in namespace", "namespace", namespace)
|
||||
}
|
||||
|
||||
allowedConstraints := sets.New[v1.UnsatisfiableConstraintAction](d.args.Constraints...)
|
||||
|
||||
namespacedPods := podutil.GroupByNamespace(pods)
|
||||
|
||||
// 1. for each namespace...
|
||||
for namespace := range namespacedPods {
|
||||
klog.V(4).InfoS("Processing namespace for topology spread constraints", "namespace", namespace)
|
||||
|
||||
if (len(includedNamespaces) > 0 && !includedNamespaces.Has(namespace)) ||
|
||||
(len(excludedNamespaces) > 0 && excludedNamespaces.Has(namespace)) {
|
||||
continue
|
||||
}
|
||||
|
||||
// ...where there is a topology constraint
|
||||
namespaceTopologySpreadConstraints := []v1.TopologySpreadConstraint{}
|
||||
for _, pod := range namespacePods.Items {
|
||||
var namespaceTopologySpreadConstraints []topologySpreadConstraint
|
||||
for _, pod := range namespacedPods[namespace] {
|
||||
for _, constraint := range pod.Spec.TopologySpreadConstraints {
|
||||
// Ignore soft topology constraints if they are not included
|
||||
if constraint.WhenUnsatisfiable == v1.ScheduleAnyway && (d.args == nil || !d.args.IncludeSoftConstraints) {
|
||||
// Ignore topology constraints if they are not included
|
||||
if !allowedConstraints.Has(constraint.WhenUnsatisfiable) {
|
||||
continue
|
||||
}
|
||||
// Need to check v1.TopologySpreadConstraint deepEquality because
|
||||
// v1.TopologySpreadConstraint has pointer fields
|
||||
|
||||
namespaceTopologySpreadConstraint, err := newTopologySpreadConstraint(constraint, pod)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "cannot process topology spread constraint")
|
||||
continue
|
||||
}
|
||||
|
||||
// Need to check TopologySpreadConstraint deepEquality because
|
||||
// TopologySpreadConstraint can haves pointer fields
|
||||
// and we don't need to go over duplicated constraints later on
|
||||
if hasIdenticalConstraints(constraint, namespaceTopologySpreadConstraints) {
|
||||
if hasIdenticalConstraints(namespaceTopologySpreadConstraint, namespaceTopologySpreadConstraints) {
|
||||
continue
|
||||
}
|
||||
namespaceTopologySpreadConstraints = append(namespaceTopologySpreadConstraints, constraint)
|
||||
namespaceTopologySpreadConstraints = append(namespaceTopologySpreadConstraints, namespaceTopologySpreadConstraint)
|
||||
}
|
||||
}
|
||||
if len(namespaceTopologySpreadConstraints) == 0 {
|
||||
@@ -153,56 +175,52 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
||||
}
|
||||
|
||||
// 2. for each topologySpreadConstraint in that namespace
|
||||
for _, constraint := range namespaceTopologySpreadConstraints {
|
||||
for _, tsc := range namespaceTopologySpreadConstraints {
|
||||
constraintTopologies := make(map[topologyPair][]*v1.Pod)
|
||||
// pre-populate the topologyPair map with all the topologies available from the nodeMap
|
||||
// (we can't just build it from existing pods' nodes because a topology may have 0 pods)
|
||||
for _, node := range nodeMap {
|
||||
if val, ok := node.Labels[constraint.TopologyKey]; ok {
|
||||
constraintTopologies[topologyPair{key: constraint.TopologyKey, value: val}] = make([]*v1.Pod, 0)
|
||||
if val, ok := node.Labels[tsc.topologyKey]; ok {
|
||||
if matchNodeInclusionPolicies(tsc, node) {
|
||||
constraintTopologies[topologyPair{key: tsc.topologyKey, value: val}] = make([]*v1.Pod, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(constraint.LabelSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Couldn't parse label selector as selector", "selector", constraint.LabelSelector)
|
||||
continue
|
||||
}
|
||||
|
||||
// 3. for each evictable pod in that namespace
|
||||
// (this loop is where we count the number of pods per topologyValue that match this constraint's selector)
|
||||
var sumPods float64
|
||||
for i := range namespacePods.Items {
|
||||
for _, pod := range namespacedPods[namespace] {
|
||||
// skip pods that are being deleted.
|
||||
if utils.IsPodTerminating(&namespacePods.Items[i]) {
|
||||
if utils.IsPodTerminating(pod) {
|
||||
continue
|
||||
}
|
||||
// 4. if the pod matches this TopologySpreadConstraint LabelSelector
|
||||
if !selector.Matches(labels.Set(namespacePods.Items[i].Labels)) {
|
||||
if !tsc.selector.Matches(labels.Set(pod.Labels)) {
|
||||
continue
|
||||
}
|
||||
|
||||
// 5. If the pod's node matches this constraint's topologyKey, create a topoPair and add the pod
|
||||
node, ok := nodeMap[namespacePods.Items[i].Spec.NodeName]
|
||||
node, ok := nodeMap[pod.Spec.NodeName]
|
||||
if !ok {
|
||||
// If ok is false, node is nil in which case node.Labels will panic. In which case a pod is yet to be scheduled. So it's safe to just continue here.
|
||||
continue
|
||||
}
|
||||
nodeValue, ok := node.Labels[constraint.TopologyKey]
|
||||
nodeValue, ok := node.Labels[tsc.topologyKey]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// 6. create a topoPair with key as this TopologySpreadConstraint
|
||||
topoPair := topologyPair{key: constraint.TopologyKey, value: nodeValue}
|
||||
topoPair := topologyPair{key: tsc.topologyKey, value: nodeValue}
|
||||
// 7. add the pod with key as this topoPair
|
||||
constraintTopologies[topoPair] = append(constraintTopologies[topoPair], &namespacePods.Items[i])
|
||||
constraintTopologies[topoPair] = append(constraintTopologies[topoPair], pod)
|
||||
sumPods++
|
||||
}
|
||||
if topologyIsBalanced(constraintTopologies, constraint) {
|
||||
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint)
|
||||
if topologyIsBalanced(constraintTopologies, tsc) {
|
||||
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", tsc)
|
||||
continue
|
||||
}
|
||||
balanceDomains(d.handle.GetPodsAssignedToNodeFunc(), podsForEviction, constraint, constraintTopologies, sumPods, d.handle.Evictor().Filter, nodes)
|
||||
d.balanceDomains(podsForEviction, tsc, constraintTopologies, sumPods, nodes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,7 +245,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
|
||||
}
|
||||
|
||||
// hasIdenticalConstraints checks if we already had an identical TopologySpreadConstraint in namespaceTopologySpreadConstraints slice
|
||||
func hasIdenticalConstraints(newConstraint v1.TopologySpreadConstraint, namespaceTopologySpreadConstraints []v1.TopologySpreadConstraint) bool {
|
||||
func hasIdenticalConstraints(newConstraint topologySpreadConstraint, namespaceTopologySpreadConstraints []topologySpreadConstraint) bool {
|
||||
for _, constraint := range namespaceTopologySpreadConstraints {
|
||||
if reflect.DeepEqual(newConstraint, constraint) {
|
||||
return true
|
||||
@@ -238,7 +256,7 @@ func hasIdenticalConstraints(newConstraint v1.TopologySpreadConstraint, namespac
|
||||
|
||||
// topologyIsBalanced checks if any domains in the topology differ by more than the MaxSkew
|
||||
// this is called before any sorting or other calculations and is used to skip topologies that don't need to be balanced
|
||||
func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, constraint v1.TopologySpreadConstraint) bool {
|
||||
func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, tsc topologySpreadConstraint) bool {
|
||||
minDomainSize := math.MaxInt32
|
||||
maxDomainSize := math.MinInt32
|
||||
for _, pods := range topology {
|
||||
@@ -248,7 +266,7 @@ func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, constraint v1.Topol
|
||||
if len(pods) > maxDomainSize {
|
||||
maxDomainSize = len(pods)
|
||||
}
|
||||
if int32(maxDomainSize-minDomainSize) > constraint.MaxSkew {
|
||||
if int32(maxDomainSize-minDomainSize) > tsc.maxSkew {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -275,19 +293,21 @@ func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, constraint v1.Topol
|
||||
// Following this, the above topology domains end up "sorted" as:
|
||||
// [5, 5, 5, 5, 5, 5]
|
||||
// (assuming even distribution by the scheduler of the evicted pods)
|
||||
func balanceDomains(
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
func (d *RemovePodsViolatingTopologySpreadConstraint) balanceDomains(
|
||||
podsForEviction map[*v1.Pod]struct{},
|
||||
constraint v1.TopologySpreadConstraint,
|
||||
tsc topologySpreadConstraint,
|
||||
constraintTopologies map[topologyPair][]*v1.Pod,
|
||||
sumPods float64,
|
||||
isEvictable func(pod *v1.Pod) bool,
|
||||
nodes []*v1.Node,
|
||||
) {
|
||||
idealAvg := sumPods / float64(len(constraintTopologies))
|
||||
isEvictable := d.handle.Evictor().Filter
|
||||
sortedDomains := sortDomains(constraintTopologies, isEvictable)
|
||||
getPodsAssignedToNode := d.handle.GetPodsAssignedToNodeFunc()
|
||||
topologyBalanceNodeFit := utilpointer.BoolDeref(d.args.TopologyBalanceNodeFit, true)
|
||||
|
||||
nodesBelowIdealAvg := filterNodesBelowIdealAvg(nodes, sortedDomains, constraint.TopologyKey, idealAvg)
|
||||
eligibleNodes := filterEligibleNodes(nodes, tsc)
|
||||
nodesBelowIdealAvg := filterNodesBelowIdealAvg(eligibleNodes, sortedDomains, tsc.topologyKey, idealAvg)
|
||||
|
||||
// i is the index for belowOrEqualAvg
|
||||
// j is the index for aboveAvg
|
||||
@@ -303,7 +323,7 @@ func balanceDomains(
|
||||
skew := float64(len(sortedDomains[j].pods) - len(sortedDomains[i].pods))
|
||||
|
||||
// if k and j are within the maxSkew of each other, move to next belowOrEqualAvg
|
||||
if int32(skew) <= constraint.MaxSkew {
|
||||
if int32(skew) <= tsc.maxSkew {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
@@ -317,7 +337,7 @@ func balanceDomains(
|
||||
aboveAvg := math.Ceil(float64(len(sortedDomains[j].pods)) - idealAvg)
|
||||
belowAvg := math.Ceil(idealAvg - float64(len(sortedDomains[i].pods)))
|
||||
smallestDiff := math.Min(aboveAvg, belowAvg)
|
||||
halfSkew := math.Ceil((skew - float64(constraint.MaxSkew)) / 2)
|
||||
halfSkew := math.Ceil((skew - float64(tsc.maxSkew)) / 2)
|
||||
movePods := int(math.Min(smallestDiff, halfSkew))
|
||||
if movePods <= 0 {
|
||||
i++
|
||||
@@ -340,7 +360,7 @@ func balanceDomains(
|
||||
// This is because the chosen pods aren't sorted, but immovable pods still count as "evicted" toward the PTS algorithm.
|
||||
// So, a better selection heuristic could improve performance.
|
||||
|
||||
if !node.PodFitsAnyOtherNode(getPodsAssignedToNode, aboveToEvict[k], nodesBelowIdealAvg) {
|
||||
if topologyBalanceNodeFit && !node.PodFitsAnyOtherNode(getPodsAssignedToNode, aboveToEvict[k], nodesBelowIdealAvg) {
|
||||
klog.V(2).InfoS("ignoring pod for eviction as it does not fit on any other node", "pod", klog.KObj(aboveToEvict[k]))
|
||||
continue
|
||||
}
|
||||
@@ -434,3 +454,92 @@ func comparePodsByPriority(iPod, jPod *v1.Pod) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// doNotScheduleTaintsFilterFunc returns the filter function that can
|
||||
// filter out the node taints that reject scheduling Pod on a Node.
|
||||
func doNotScheduleTaintsFilterFunc() func(t *v1.Taint) bool {
|
||||
return func(t *v1.Taint) bool {
|
||||
// PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints.
|
||||
return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute
|
||||
}
|
||||
}
|
||||
|
||||
func filterEligibleNodes(nodes []*v1.Node, tsc topologySpreadConstraint) []*v1.Node {
|
||||
var eligibleNodes []*v1.Node
|
||||
for _, node := range nodes {
|
||||
if matchNodeInclusionPolicies(tsc, node) {
|
||||
eligibleNodes = append(eligibleNodes, node)
|
||||
}
|
||||
}
|
||||
return eligibleNodes
|
||||
}
|
||||
|
||||
func matchNodeInclusionPolicies(tsc topologySpreadConstraint, node *v1.Node) bool {
|
||||
if tsc.nodeAffinityPolicy == v1.NodeInclusionPolicyHonor {
|
||||
// We ignore parsing errors here for backwards compatibility.
|
||||
if match, _ := tsc.podNodeAffinity.Match(node); !match {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if tsc.nodeTaintsPolicy == v1.NodeInclusionPolicyHonor {
|
||||
if _, untolerated := v1helper.FindMatchingUntoleratedTaint(node.Spec.Taints, tsc.podTolerations, doNotScheduleTaintsFilterFunc()); untolerated {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// inspired by Scheduler: https://github.com/kubernetes/kubernetes/blob/release-1.28/pkg/scheduler/framework/plugins/podtopologyspread/common.go#L90
|
||||
func newTopologySpreadConstraint(constraint v1.TopologySpreadConstraint, pod *v1.Pod) (topologySpreadConstraint, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(constraint.LabelSelector)
|
||||
if err != nil {
|
||||
return topologySpreadConstraint{}, err
|
||||
}
|
||||
|
||||
if len(constraint.MatchLabelKeys) > 0 && pod.Labels != nil {
|
||||
matchLabels := make(labels.Set)
|
||||
for _, labelKey := range constraint.MatchLabelKeys {
|
||||
if value, ok := pod.Labels[labelKey]; ok {
|
||||
matchLabels[labelKey] = value
|
||||
}
|
||||
}
|
||||
if len(matchLabels) > 0 {
|
||||
selector = mergeLabelSetWithSelector(matchLabels, selector)
|
||||
}
|
||||
}
|
||||
|
||||
tsc := topologySpreadConstraint{
|
||||
maxSkew: constraint.MaxSkew,
|
||||
topologyKey: constraint.TopologyKey,
|
||||
selector: selector,
|
||||
nodeAffinityPolicy: v1.NodeInclusionPolicyHonor, // If NodeAffinityPolicy is nil, we treat NodeAffinityPolicy as "Honor".
|
||||
nodeTaintsPolicy: v1.NodeInclusionPolicyIgnore, // If NodeTaintsPolicy is nil, we treat NodeTaintsPolicy as "Ignore".
|
||||
podNodeAffinity: nodeaffinity.GetRequiredNodeAffinity(pod),
|
||||
podTolerations: pod.Spec.Tolerations,
|
||||
}
|
||||
if constraint.NodeAffinityPolicy != nil {
|
||||
tsc.nodeAffinityPolicy = *constraint.NodeAffinityPolicy
|
||||
}
|
||||
if constraint.NodeTaintsPolicy != nil {
|
||||
tsc.nodeTaintsPolicy = *constraint.NodeTaintsPolicy
|
||||
}
|
||||
|
||||
return tsc, nil
|
||||
}
|
||||
|
||||
// Scheduler: https://github.com/kubernetes/kubernetes/blob/release-1.28/pkg/scheduler/framework/plugins/podtopologyspread/common.go#L136
|
||||
func mergeLabelSetWithSelector(matchLabels labels.Set, s labels.Selector) labels.Selector {
|
||||
mergedSelector := labels.SelectorFromSet(matchLabels)
|
||||
|
||||
requirements, ok := s.Requirements()
|
||||
if !ok {
|
||||
return s
|
||||
}
|
||||
|
||||
for _, r := range requirements {
|
||||
mergedSelector = mergedSelector.Add(r)
|
||||
}
|
||||
|
||||
return mergedSelector
|
||||
}
|
||||
|
||||
@@ -3,8 +3,10 @@ package removepodsviolatingtopologyspreadconstraint
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -14,6 +16,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
@@ -98,6 +101,43 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2] (both constraints)",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
count: 2,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2] (soft constraints)",
|
||||
nodes: []*v1.Node{
|
||||
@@ -131,7 +171,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{IncludeSoftConstraints: true},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [3,1], maxSkew=1, no pods eligible, move 0 pods",
|
||||
@@ -262,12 +304,14 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"zone": "zoneA"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
nodeAffinity: &v1.Affinity{NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{MatchExpressions: []v1.NodeSelectorRequirement{{Key: "foo", Values: []string{"bar"}, Operator: v1.NodeSelectorOpIn}}},
|
||||
@@ -275,9 +319,10 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
count: 1,
|
||||
node: "n1",
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
@@ -285,6 +330,163 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
name: "6 domains, sizes [0,0,0,3,3,2], maxSkew=1, No pod is evicted since topology is balanced",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeA"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeB"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeC"; n.Labels["os"] = "linux" }),
|
||||
|
||||
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeA"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n5", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeB"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n6", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeC"; n.Labels["os"] = "windows" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 3,
|
||||
node: "n4",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
node: "n5",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 2,
|
||||
node: "n6",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: false,
|
||||
},
|
||||
{
|
||||
name: "7 domains, sizes [0,0,0,3,3,2], maxSkew=1, two pods are evicted and moved to new node added",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeA"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeB"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeC"; n.Labels["os"] = "linux" }),
|
||||
|
||||
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeA"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n5", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeB"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n6", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeC"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n7", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeD"; n.Labels["os"] = "windows" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 3,
|
||||
node: "n4",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
node: "n5",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 2,
|
||||
node: "n6",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 2,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: false,
|
||||
},
|
||||
{
|
||||
name: "6 domains, sizes [0,0,0,4,3,1], maxSkew=1, 1 pod is evicted from node 4",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeA"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeB"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeC"; n.Labels["os"] = "linux" }),
|
||||
|
||||
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeA"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n5", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeB"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n6", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeC"; n.Labels["os"] = "windows" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 4,
|
||||
node: "n4",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
node: "n5",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n6",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: false,
|
||||
},
|
||||
{
|
||||
name: "6 domains, sizes [0,0,0,4,3,1], maxSkew=1, 0 pod is evicted as pods of node:winNodeA can only be scheduled on this node and nodeFit is true ",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeA"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeB"; n.Labels["os"] = "linux" }),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "linNodeC"; n.Labels["os"] = "linux" }),
|
||||
|
||||
test.BuildTestNode("n4", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeA"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n5", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeB"; n.Labels["os"] = "windows" }),
|
||||
test.BuildTestNode("n6", 2000, 3000, 10, func(n *v1.Node) { n.Labels["node"] = "winNodeC"; n.Labels["os"] = "windows" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 4,
|
||||
node: "n4",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"node": "winNodeA"},
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
node: "n5",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n6",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultNodeTopologyConstraints(1),
|
||||
nodeSelector: map[string]string{"os": "windows"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [4,0], maxSkew=1, move 2 pods since selector matches multiple nodes",
|
||||
nodes: []*v1.Node{
|
||||
@@ -587,7 +789,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{IncludeSoftConstraints: true},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "3 domains size [8 7 0], maxSkew=1, should move 5 to get [5 5 5]",
|
||||
@@ -611,7 +815,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 5,
|
||||
expectedEvictedPods: []string{"pod-5", "pod-6", "pod-7", "pod-8"},
|
||||
expectedEvictedPods: []string{"pod-5", "pod-6", "pod-7", "pod-8", "pod-9"},
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
},
|
||||
@@ -860,6 +1064,55 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn)},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pods given matchLabelKeys on same replicaset",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "foo"},
|
||||
constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1),
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "foo"},
|
||||
constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1),
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
expectedEvictedPods: []string{"pod-1"},
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn)},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods given matchLabelKeys on two different replicasets",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "foo"},
|
||||
constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1),
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar", appsv1.DefaultDeploymentUniqueLabelKey: "bar"},
|
||||
constraints: getDefaultTopologyConstraintsWithPodTemplateHashMatch(1),
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{LabelSelector: getLabelSelector("foo", []string{"baz"}, metav1.LabelSelectorOpNotIn)},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [4,2], maxSkew=1, 2 pods in termination; nothing should be moved",
|
||||
nodes: []*v1.Node{
|
||||
@@ -923,6 +1176,38 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
name: "3 domains, sizes [2,3,4], maxSkew=1, args.NodeFit is false, and not enough cpu on zoneA; 1 should be moved to force scale-up",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 250, 2000, 9, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 1000, 2000, 9, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
test.BuildTestNode("n3", 1000, 2000, 9, func(n *v1.Node) { n.Labels["zone"] = "zoneC" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 2,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
},
|
||||
{
|
||||
count: 3,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
},
|
||||
{
|
||||
count: 4,
|
||||
node: "n3",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{TopologyBalanceNodeFit: utilpointer.Bool(false)},
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
name: "3 domains, sizes [[1,0], [1,1], [2,1]], maxSkew=1, NodeFit is enabled, and not enough cpu on ZoneA; nothing should be moved",
|
||||
nodes: []*v1.Node{
|
||||
@@ -1137,11 +1422,21 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
podsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
// workaround to ensure that pods are returned sorted so 'expectedEvictedPods' would work consistently
|
||||
getPodsAssignedToNode := func(s string, filterFunc podutil.FilterFunc) ([]*v1.Pod, error) {
|
||||
pods, err := podsAssignedToNode(s, filterFunc)
|
||||
sort.Slice(pods, func(i, j int) bool {
|
||||
return pods[i].Name < pods[j].Name
|
||||
})
|
||||
|
||||
return pods, err
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
@@ -1200,6 +1495,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
SetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(&tc.args)
|
||||
|
||||
plugin, err := New(
|
||||
&tc.args,
|
||||
handle,
|
||||
@@ -1215,12 +1512,12 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}
|
||||
|
||||
if tc.expectedEvictedPods != nil {
|
||||
diff := sets.NewString(tc.expectedEvictedPods...).Difference(sets.NewString(evictedPods...))
|
||||
diff := sets.New(tc.expectedEvictedPods...).Difference(sets.New(evictedPods...))
|
||||
if diff.Len() > 0 {
|
||||
t.Errorf(
|
||||
"Expected pods %v to be evicted but %v were not evicted. Actual pods evicted: %v",
|
||||
tc.expectedEvictedPods,
|
||||
diff.List(),
|
||||
diff.UnsortedList(),
|
||||
evictedPods,
|
||||
)
|
||||
}
|
||||
@@ -1284,32 +1581,53 @@ func getDefaultTopologyConstraints(maxSkew int32) []v1.TopologySpreadConstraint
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckIdenticalConstraints(t *testing.T) {
|
||||
newConstraintSame := v1.TopologySpreadConstraint{
|
||||
MaxSkew: 2,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
}
|
||||
newConstraintDifferent := v1.TopologySpreadConstraint{
|
||||
MaxSkew: 3,
|
||||
TopologyKey: "node",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
}
|
||||
namespaceTopologySpreadConstraint := []v1.TopologySpreadConstraint{}
|
||||
namespaceTopologySpreadConstraint = []v1.TopologySpreadConstraint{
|
||||
func getDefaultNodeTopologyConstraints(maxSkew int32) []v1.TopologySpreadConstraint {
|
||||
return []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 2,
|
||||
TopologyKey: "zone",
|
||||
MaxSkew: maxSkew,
|
||||
TopologyKey: "node",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getDefaultTopologyConstraintsWithPodTemplateHashMatch(maxSkew int32) []v1.TopologySpreadConstraint {
|
||||
return []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: maxSkew,
|
||||
TopologyKey: "zone",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
MatchLabelKeys: []string{appsv1.DefaultDeploymentUniqueLabelKey},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckIdenticalConstraints(t *testing.T) {
|
||||
selector, _ := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}})
|
||||
|
||||
newConstraintSame := topologySpreadConstraint{
|
||||
maxSkew: 2,
|
||||
topologyKey: "zone",
|
||||
selector: selector.DeepCopySelector(),
|
||||
}
|
||||
newConstraintDifferent := topologySpreadConstraint{
|
||||
maxSkew: 3,
|
||||
topologyKey: "node",
|
||||
selector: selector.DeepCopySelector(),
|
||||
}
|
||||
namespaceTopologySpreadConstraint := []topologySpreadConstraint{
|
||||
{
|
||||
maxSkew: 2,
|
||||
topologyKey: "zone",
|
||||
selector: selector.DeepCopySelector(),
|
||||
},
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
namespaceTopologySpreadConstraints []v1.TopologySpreadConstraint
|
||||
newConstraint v1.TopologySpreadConstraint
|
||||
namespaceTopologySpreadConstraints []topologySpreadConstraint
|
||||
newConstraint topologySpreadConstraint
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package removepodsviolatingtopologyspreadconstraint
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
@@ -28,7 +29,8 @@ import (
|
||||
type RemovePodsViolatingTopologySpreadConstraintArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
Namespaces *api.Namespaces `json:"namespaces"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
IncludeSoftConstraints bool `json:"includeSoftConstraints"`
|
||||
Namespaces *api.Namespaces `json:"namespaces"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
Constraints []v1.UnsatisfiableConstraintAction `json:"constraints"`
|
||||
TopologyBalanceNodeFit *bool `json:"topologyBalanceNodeFit"`
|
||||
}
|
||||
|
||||
@@ -19,23 +19,37 @@ package removepodsviolatingtopologyspreadconstraint
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// ValidateRemovePodsViolatingTopologySpreadConstraintArgs validates RemovePodsViolatingTopologySpreadConstraint arguments
|
||||
func ValidateRemovePodsViolatingTopologySpreadConstraintArgs(obj runtime.Object) error {
|
||||
var errs []error
|
||||
|
||||
args := obj.(*RemovePodsViolatingTopologySpreadConstraintArgs)
|
||||
// At most one of include/exclude can be set
|
||||
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
errs = append(errs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
|
||||
}
|
||||
|
||||
if args.LabelSelector != nil {
|
||||
if _, err := metav1.LabelSelectorAsSelector(args.LabelSelector); err != nil {
|
||||
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
|
||||
errs = append(errs, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
if len(args.Constraints) > 0 {
|
||||
supportedConstraints := sets.New(v1.DoNotSchedule, v1.ScheduleAnyway)
|
||||
for _, constraint := range args.Constraints {
|
||||
if !supportedConstraints.Has(constraint) {
|
||||
errs = append(errs, fmt.Errorf("constraint %s is not one of %v", constraint, supportedConstraints))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,85 @@
|
||||
package removepodsviolatingtopologyspreadconstraint
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
func TestValidateRemovePodsViolatingTopologySpreadConstraintArgs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
args *RemovePodsViolatingTopologySpreadConstraintArgs
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
description: "valid namespace args, no errors",
|
||||
args: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Include: []string{"default"},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid namespaces args, expects error",
|
||||
args: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Include: []string{"default"},
|
||||
Exclude: []string{"kube-system"},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "valid label selector args, no errors",
|
||||
args: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"role.kubernetes.io/node": ""},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid label selector args, expects errors",
|
||||
args: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "valid constraints args, no errors",
|
||||
args: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid constraints args, expects errors",
|
||||
args: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{"foo"},
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := ValidateRemovePodsViolatingTopologySpreadConstraintArgs(tc.args)
|
||||
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
t.Error("unexpected arg validation behavior")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -22,6 +22,7 @@ limitations under the License.
|
||||
package removepodsviolatingtopologyspreadconstraint
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
api "sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -41,6 +42,16 @@ func (in *RemovePodsViolatingTopologySpreadConstraintArgs) DeepCopyInto(out *Rem
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Constraints != nil {
|
||||
in, out := &in.Constraints, &out.Constraints
|
||||
*out = make([]corev1.UnsatisfiableConstraintAction, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.TopologyBalanceNodeFit != nil {
|
||||
in, out := &in.TopologyBalanceNodeFit, &out.TopologyBalanceNodeFit
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -18,12 +18,15 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
@@ -113,10 +116,10 @@ type profileImpl struct {
|
||||
preEvictionFilterPlugins []preEvictionFilterPlugin
|
||||
|
||||
// Each extension point with a list of plugins implementing the extension point.
|
||||
deschedule sets.String
|
||||
balance sets.String
|
||||
filter sets.String
|
||||
preEvictionFilter sets.String
|
||||
deschedule sets.Set[string]
|
||||
balance sets.Set[string]
|
||||
filter sets.Set[string]
|
||||
preEvictionFilter sets.Set[string]
|
||||
}
|
||||
|
||||
// Option for the handleImpl.
|
||||
@@ -184,10 +187,10 @@ func buildPlugin(config api.DeschedulerProfile, pluginName string, handle *handl
|
||||
}
|
||||
|
||||
func (p *profileImpl) registryToExtensionPoints(registry pluginregistry.Registry) {
|
||||
p.deschedule = sets.NewString()
|
||||
p.balance = sets.NewString()
|
||||
p.filter = sets.NewString()
|
||||
p.preEvictionFilter = sets.NewString()
|
||||
p.deschedule = sets.New[string]()
|
||||
p.balance = sets.New[string]()
|
||||
p.filter = sets.New[string]()
|
||||
p.preEvictionFilter = sets.New[string]()
|
||||
|
||||
for plugin, pluginUtilities := range registry {
|
||||
if _, ok := pluginUtilities.PluginType.(frameworktypes.DeschedulePlugin); ok {
|
||||
@@ -232,16 +235,16 @@ func NewProfile(config api.DeschedulerProfile, reg pluginregistry.Registry, opts
|
||||
pi.registryToExtensionPoints(reg)
|
||||
|
||||
if !pi.deschedule.HasAll(config.Plugins.Deschedule.Enabled...) {
|
||||
return nil, fmt.Errorf("profile %q configures deschedule extension point of non-existing plugins: %v", config.Name, sets.NewString(config.Plugins.Deschedule.Enabled...).Difference(pi.deschedule))
|
||||
return nil, fmt.Errorf("profile %q configures deschedule extension point of non-existing plugins: %v", config.Name, sets.New(config.Plugins.Deschedule.Enabled...).Difference(pi.deschedule))
|
||||
}
|
||||
if !pi.balance.HasAll(config.Plugins.Balance.Enabled...) {
|
||||
return nil, fmt.Errorf("profile %q configures balance extension point of non-existing plugins: %v", config.Name, sets.NewString(config.Plugins.Balance.Enabled...).Difference(pi.balance))
|
||||
return nil, fmt.Errorf("profile %q configures balance extension point of non-existing plugins: %v", config.Name, sets.New(config.Plugins.Balance.Enabled...).Difference(pi.balance))
|
||||
}
|
||||
if !pi.filter.HasAll(config.Plugins.Filter.Enabled...) {
|
||||
return nil, fmt.Errorf("profile %q configures filter extension point of non-existing plugins: %v", config.Name, sets.NewString(config.Plugins.Filter.Enabled...).Difference(pi.filter))
|
||||
return nil, fmt.Errorf("profile %q configures filter extension point of non-existing plugins: %v", config.Name, sets.New(config.Plugins.Filter.Enabled...).Difference(pi.filter))
|
||||
}
|
||||
if !pi.preEvictionFilter.HasAll(config.Plugins.PreEvictionFilter.Enabled...) {
|
||||
return nil, fmt.Errorf("profile %q configures preEvictionFilter extension point of non-existing plugins: %v", config.Name, sets.NewString(config.Plugins.PreEvictionFilter.Enabled...).Difference(pi.preEvictionFilter))
|
||||
return nil, fmt.Errorf("profile %q configures preEvictionFilter extension point of non-existing plugins: %v", config.Name, sets.New(config.Plugins.PreEvictionFilter.Enabled...).Difference(pi.preEvictionFilter))
|
||||
}
|
||||
|
||||
handle := &handleImpl{
|
||||
@@ -258,7 +261,7 @@ func NewProfile(config api.DeschedulerProfile, reg pluginregistry.Registry, opts
|
||||
pluginNames = append(pluginNames, config.Plugins.PreEvictionFilter.Enabled...)
|
||||
|
||||
plugins := make(map[string]frameworktypes.Plugin)
|
||||
for _, plugin := range sets.NewString(pluginNames...).List() {
|
||||
for _, plugin := range sets.New(pluginNames...).UnsortedList() {
|
||||
pg, err := buildPlugin(config, plugin, handle, reg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to build %v plugin: %v", plugin, err)
|
||||
@@ -300,6 +303,9 @@ func NewProfile(config api.DeschedulerProfile, reg pluginregistry.Registry, opts
|
||||
func (d profileImpl) RunDeschedulePlugins(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
errs := []error{}
|
||||
for _, pl := range d.deschedulePlugins {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, pl.Name(), trace.WithAttributes(attribute.String("plugin", pl.Name()), attribute.String("profile", d.profileName), attribute.String("operation", tracing.DescheduleOperation)))
|
||||
defer span.End()
|
||||
evicted := d.podEvictor.TotalEvicted()
|
||||
// TODO: strategyName should be accessible from within the strategy using a framework
|
||||
// handle or function which the Evictor has access to. For migration/in-progress framework
|
||||
@@ -311,6 +317,7 @@ func (d profileImpl) RunDeschedulePlugins(ctx context.Context, nodes []*v1.Node)
|
||||
metrics.DeschedulerStrategyDuration.With(map[string]string{"strategy": pl.Name(), "profile": d.profileName}).Observe(time.Since(strategyStart).Seconds())
|
||||
|
||||
if status != nil && status.Err != nil {
|
||||
span.AddEvent("Plugin Execution Failed", trace.WithAttributes(attribute.String("err", status.Err.Error())))
|
||||
errs = append(errs, fmt.Errorf("plugin %q finished with error: %v", pl.Name(), status.Err))
|
||||
}
|
||||
klog.V(1).InfoS("Total number of pods evicted", "extension point", "Deschedule", "evictedPods", d.podEvictor.TotalEvicted()-evicted)
|
||||
@@ -329,6 +336,9 @@ func (d profileImpl) RunDeschedulePlugins(ctx context.Context, nodes []*v1.Node)
|
||||
func (d profileImpl) RunBalancePlugins(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
errs := []error{}
|
||||
for _, pl := range d.balancePlugins {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, pl.Name(), trace.WithAttributes(attribute.String("plugin", pl.Name()), attribute.String("profile", d.profileName), attribute.String("operation", tracing.BalanceOperation)))
|
||||
defer span.End()
|
||||
evicted := d.podEvictor.TotalEvicted()
|
||||
// TODO: strategyName should be accessible from within the strategy using a framework
|
||||
// handle or function which the Evictor has access to. For migration/in-progress framework
|
||||
@@ -340,6 +350,7 @@ func (d profileImpl) RunBalancePlugins(ctx context.Context, nodes []*v1.Node) *f
|
||||
metrics.DeschedulerStrategyDuration.With(map[string]string{"strategy": pl.Name(), "profile": d.profileName}).Observe(time.Since(strategyStart).Seconds())
|
||||
|
||||
if status != nil && status.Err != nil {
|
||||
span.AddEvent("Plugin Execution Failed", trace.WithAttributes(attribute.String("err", status.Err.Error())))
|
||||
errs = append(errs, fmt.Errorf("plugin %q finished with error: %v", pl.Name(), status.Err))
|
||||
}
|
||||
klog.V(1).InfoS("Total number of pods evicted", "extension point", "Balance", "evictedPods", d.podEvictor.TotalEvicted()-evicted)
|
||||
|
||||
@@ -444,19 +444,19 @@ func TestProfileExtensionPoints(t *testing.T) {
|
||||
|
||||
// Validate the extension points of all registered plugins are properly detected
|
||||
|
||||
diff := cmp.Diff(sets.NewString("DeschedulePlugin_0", "DeschedulePlugin_1", "DeschedulePlugin_2", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2"), prfl.deschedule)
|
||||
diff := cmp.Diff(sets.New("DeschedulePlugin_0", "DeschedulePlugin_1", "DeschedulePlugin_2", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2"), prfl.deschedule)
|
||||
if diff != "" {
|
||||
t.Errorf("check for deschedule failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
diff = cmp.Diff(sets.NewString("BalancePlugin_0", "BalancePlugin_1", "BalancePlugin_2", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2"), prfl.balance)
|
||||
diff = cmp.Diff(sets.New("BalancePlugin_0", "BalancePlugin_1", "BalancePlugin_2", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2"), prfl.balance)
|
||||
if diff != "" {
|
||||
t.Errorf("check for balance failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
diff = cmp.Diff(sets.NewString("DefaultEvictor", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2", "FilterPlugin_0", "FilterPlugin_1", "FilterPlugin_2"), prfl.filter)
|
||||
diff = cmp.Diff(sets.New("DefaultEvictor", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2", "FilterPlugin_0", "FilterPlugin_1", "FilterPlugin_2"), prfl.filter)
|
||||
if diff != "" {
|
||||
t.Errorf("check for filter failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
diff = cmp.Diff(sets.NewString("DefaultEvictor", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2", "FilterPlugin_0", "FilterPlugin_1", "FilterPlugin_2"), prfl.preEvictionFilter)
|
||||
diff = cmp.Diff(sets.New("DefaultEvictor", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2", "FilterPlugin_0", "FilterPlugin_1", "FilterPlugin_2"), prfl.preEvictionFilter)
|
||||
if diff != "" {
|
||||
t.Errorf("check for preEvictionFilter failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
@@ -467,7 +467,7 @@ func TestProfileExtensionPoints(t *testing.T) {
|
||||
names = append(names, pl.Name())
|
||||
}
|
||||
sort.Strings(names)
|
||||
diff = cmp.Diff(sets.NewString("FakePlugin_0"), sets.NewString(names...))
|
||||
diff = cmp.Diff(sets.New("FakePlugin_0"), sets.New(names...))
|
||||
if diff != "" {
|
||||
t.Errorf("check for deschedule failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
@@ -478,7 +478,7 @@ func TestProfileExtensionPoints(t *testing.T) {
|
||||
names = append(names, pl.Name())
|
||||
}
|
||||
sort.Strings(names)
|
||||
diff = cmp.Diff(sets.NewString(), sets.NewString(names...))
|
||||
diff = cmp.Diff(sets.New[string](), sets.New(names...))
|
||||
if diff != "" {
|
||||
t.Errorf("check for balance failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
@@ -489,7 +489,7 @@ func TestProfileExtensionPoints(t *testing.T) {
|
||||
names = append(names, pl.Name())
|
||||
}
|
||||
sort.Strings(names)
|
||||
diff = cmp.Diff(sets.NewString("DefaultEvictor", "FilterPlugin_0", "FilterPlugin_1"), sets.NewString(names...))
|
||||
diff = cmp.Diff(sets.New("DefaultEvictor", "FilterPlugin_0", "FilterPlugin_1"), sets.New(names...))
|
||||
if diff != "" {
|
||||
t.Errorf("check for filter failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
156
pkg/tracing/tracing.go
Normal file
156
pkg/tracing/tracing.go
Normal file
@@ -0,0 +1,156 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"os"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
sdkresource "go.opentelemetry.io/otel/sdk/resource"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultServiceName is the default service name used for tracing.
|
||||
DefaultServiceName = "descheduler"
|
||||
|
||||
// DescheduleOperation is the operation name used for Deschedule() functions.
|
||||
DescheduleOperation = "deschedule"
|
||||
|
||||
// BalanceOperation is the operation name used for Balance() functions.
|
||||
BalanceOperation = "balance"
|
||||
|
||||
// EvictOperation is the operation name used for Evict() functions.
|
||||
EvictOperation = "evict"
|
||||
|
||||
// TracerName is used to setup the named tracer
|
||||
TracerName = "sigs.k8s.io/descheduler"
|
||||
)
|
||||
|
||||
var (
|
||||
tracer trace.Tracer
|
||||
provider trace.TracerProvider
|
||||
)
|
||||
|
||||
func init() {
|
||||
provider = trace.NewNoopTracerProvider()
|
||||
tracer = provider.Tracer(TracerName)
|
||||
}
|
||||
|
||||
func Tracer() trace.Tracer {
|
||||
return tracer
|
||||
}
|
||||
|
||||
// NewTracerProvider creates a new trace provider with the given options.
|
||||
func NewTracerProvider(ctx context.Context, endpoint, caCert, name, namespace string, sampleRate float64, fallbackToNoOpTracer bool) (err error) {
|
||||
defer func(p trace.TracerProvider) {
|
||||
if err != nil && !fallbackToNoOpTracer {
|
||||
return
|
||||
}
|
||||
if err != nil && fallbackToNoOpTracer {
|
||||
klog.ErrorS(err, "ran into an error trying to setup a trace provider. Falling back to NoOp provider")
|
||||
err = nil
|
||||
provider = trace.NewNoopTracerProvider()
|
||||
}
|
||||
otel.SetTextMapPropagator(propagation.TraceContext{})
|
||||
otel.SetTracerProvider(provider)
|
||||
otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) {
|
||||
klog.ErrorS(err, "got error from opentelemetry")
|
||||
}))
|
||||
tracer = otel.GetTracerProvider().Tracer(TracerName)
|
||||
}(provider)
|
||||
|
||||
if endpoint == "" {
|
||||
klog.V(2).Info("Did not find a trace collector endpoint defined. Switching to NoopTraceProvider")
|
||||
provider = trace.NewNoopTracerProvider()
|
||||
return
|
||||
}
|
||||
|
||||
var opts []otlptracegrpc.Option
|
||||
opts = append(opts, otlptracegrpc.WithEndpoint(endpoint))
|
||||
var data []byte
|
||||
if caCert != "" {
|
||||
data, err = os.ReadFile(caCert)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to extract ca certificate required to generate the transport credentials")
|
||||
return err
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
if !pool.AppendCertsFromPEM(data) {
|
||||
klog.Error("failed to create cert pool using the ca certificate provided for generating the transport credentials")
|
||||
return err
|
||||
}
|
||||
klog.Info("Enabling trace GRPC client in secure TLS mode")
|
||||
opts = append(opts, otlptracegrpc.WithTLSCredentials(credentials.NewClientTLSFromCert(pool, "")))
|
||||
} else {
|
||||
klog.Info("Enabling trace GRPC client in insecure mode")
|
||||
opts = append(opts, otlptracegrpc.WithInsecure())
|
||||
}
|
||||
|
||||
client := otlptracegrpc.NewClient(opts...)
|
||||
|
||||
exporter, err := otlptrace.New(ctx, client)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to create an instance of the trace exporter")
|
||||
return err
|
||||
}
|
||||
if name == "" {
|
||||
klog.V(5).InfoS("no name provided, using default service name for tracing", "name", DefaultServiceName)
|
||||
name = DefaultServiceName
|
||||
}
|
||||
resourceOpts := []sdkresource.Option{sdkresource.WithAttributes(semconv.ServiceNameKey.String(name)), sdkresource.WithSchemaURL(semconv.SchemaURL), sdkresource.WithProcess()}
|
||||
if namespace != "" {
|
||||
resourceOpts = append(resourceOpts, sdkresource.WithAttributes(semconv.ServiceNamespaceKey.String(namespace)))
|
||||
}
|
||||
resource, err := sdkresource.New(ctx, resourceOpts...)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to create traceable resource")
|
||||
return err
|
||||
}
|
||||
|
||||
spanProcessor := sdktrace.NewBatchSpanProcessor(exporter)
|
||||
provider = sdktrace.NewTracerProvider(
|
||||
sdktrace.WithSampler(sdktrace.ParentBased(sdktrace.TraceIDRatioBased(sampleRate))),
|
||||
sdktrace.WithSpanProcessor(spanProcessor),
|
||||
sdktrace.WithResource(resource),
|
||||
)
|
||||
klog.V(2).Info("Successfully setup trace provider")
|
||||
return
|
||||
}
|
||||
|
||||
// Shutdown shuts down the global trace exporter.
|
||||
func Shutdown(ctx context.Context) error {
|
||||
tp, ok := provider.(*sdktrace.TracerProvider)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if err := tp.Shutdown(ctx); err != nil {
|
||||
otel.Handle(err)
|
||||
klog.ErrorS(err, "failed to shutdown the trace exporter")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -213,3 +213,28 @@ func PodToleratesTaints(pod *v1.Pod, taintsOfNodes map[string][]v1.Taint) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PodHasNodeAffinity returns true if the pod has a node affinity of type
|
||||
// `nodeAffinityType` defined. The nodeAffinityType param can take this two values:
|
||||
// "requiredDuringSchedulingIgnoredDuringExecution" or "requiredDuringSchedulingIgnoredDuringExecution"
|
||||
func PodHasNodeAffinity(pod *v1.Pod, nodeAffinityType NodeAffinityType) bool {
|
||||
if pod.Spec.Affinity == nil {
|
||||
return false
|
||||
}
|
||||
if pod.Spec.Affinity.NodeAffinity == nil {
|
||||
return false
|
||||
}
|
||||
if nodeAffinityType == RequiredDuringSchedulingIgnoredDuringExecution {
|
||||
return pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil
|
||||
} else if nodeAffinityType == PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
return len(pod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution) > 0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type NodeAffinityType string
|
||||
|
||||
const (
|
||||
RequiredDuringSchedulingIgnoredDuringExecution NodeAffinityType = "requiredDuringSchedulingIgnoredDuringExecution"
|
||||
PreferredDuringSchedulingIgnoredDuringExecution NodeAffinityType = "preferredDuringSchedulingIgnoredDuringExecution"
|
||||
)
|
||||
|
||||
@@ -275,3 +275,27 @@ func TolerationsEqual(t1, t2 []v1.Toleration) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns the weight that the pod gives to a node by analyzing the
|
||||
// soft node affinity of that pod
|
||||
// (nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution)
|
||||
func GetNodeWeightGivenPodPreferredAffinity(pod *v1.Pod, node *v1.Node) (int32, error) {
|
||||
if !PodHasNodeAffinity(pod, PreferredDuringSchedulingIgnoredDuringExecution) {
|
||||
return 0, nil
|
||||
}
|
||||
// Iterate over each PreferredSchedulingTerm and check if it matches with the current node labels.
|
||||
// If so, add the weight of the PreferredSchedulingTerm to the sum of weight. With that, we'll know
|
||||
// the weight that the nodeAffinity from this pod gives to this node.
|
||||
var sumWeights int32 = 0
|
||||
for _, prefSchedulTerm := range pod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
preferredNodeSelector := &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{prefSchedulTerm.Preference}}
|
||||
match, err := corev1.MatchNodeSelectorTerms(node, preferredNodeSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "error parsing node selector", "selector", preferredNodeSelector)
|
||||
}
|
||||
if match {
|
||||
sumWeights += prefSchedulTerm.Weight
|
||||
}
|
||||
}
|
||||
return sumWeights, nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestUniqueSortTolerations(t *testing.T) {
|
||||
@@ -938,3 +939,105 @@ func TestNodeSelectorTermsEqual(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func createNodeSelectorTerm(key, value string) v1.NodeSelectorTerm {
|
||||
return v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: key,
|
||||
Operator: "In",
|
||||
Values: []string{value},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodNodeAffinityWeight(t *testing.T) {
|
||||
defaultNode := v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
"key3": "value3",
|
||||
},
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
affinity *v1.Affinity
|
||||
expectedWeight int32
|
||||
}{
|
||||
{
|
||||
name: "No affinity",
|
||||
affinity: nil,
|
||||
expectedWeight: 0,
|
||||
},
|
||||
{
|
||||
name: "No node affinity",
|
||||
affinity: &v1.Affinity{},
|
||||
expectedWeight: 0,
|
||||
},
|
||||
{
|
||||
name: "Empty preferred node affinity, but matching required node affinity",
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
createNodeSelectorTerm("key1", "value1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedWeight: 0,
|
||||
},
|
||||
{
|
||||
name: "Matching single key in preferred node affinity",
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Weight: 10,
|
||||
Preference: createNodeSelectorTerm("key1", "value1"),
|
||||
},
|
||||
{
|
||||
Weight: 5,
|
||||
Preference: createNodeSelectorTerm("key1", "valueX"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedWeight: 10,
|
||||
},
|
||||
{
|
||||
name: "Matching two keys in preferred node affinity",
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Weight: 10,
|
||||
Preference: createNodeSelectorTerm("key1", "value1"),
|
||||
},
|
||||
{
|
||||
Weight: 5,
|
||||
Preference: createNodeSelectorTerm("key2", "value2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedWeight: 15,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
pod := v1.Pod{}
|
||||
pod.Spec.Affinity = test.affinity
|
||||
totalWeight, err := GetNodeWeightGivenPodPreferredAffinity(&pod, &defaultNode)
|
||||
if err != nil {
|
||||
t.Error("Found non nil error")
|
||||
}
|
||||
if totalWeight != test.expectedWeight {
|
||||
t.Errorf("Expected total weight is %v but actual total weight is %v", test.expectedWeight, totalWeight)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,8 +17,8 @@ const SystemCriticalPriority = 2 * int32(1000000000)
|
||||
// GetNamespacesFromPodAffinityTerm returns a set of names
|
||||
// according to the namespaces indicated in podAffinityTerm.
|
||||
// If namespaces is empty it considers the given pod's namespace.
|
||||
func GetNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffinityTerm) sets.String {
|
||||
names := sets.String{}
|
||||
func GetNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffinityTerm) sets.Set[string] {
|
||||
names := sets.New[string]()
|
||||
if len(podAffinityTerm.Namespaces) == 0 {
|
||||
names.Insert(pod.Namespace)
|
||||
} else {
|
||||
@@ -29,7 +29,7 @@ func GetNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffini
|
||||
|
||||
// PodMatchesTermsNamespaceAndSelector returns true if the given <pod>
|
||||
// matches the namespace and selector defined by <affinityPod>`s <term>.
|
||||
func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, namespaces sets.String, selector labels.Selector) bool {
|
||||
func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, namespaces sets.Set[string], selector labels.Selector) bool {
|
||||
if !namespaces.Has(pod.Namespace) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
var supportedQoSComputeResources = sets.NewString(string(v1.ResourceCPU), string(v1.ResourceMemory))
|
||||
var supportedQoSComputeResources = sets.New(string(v1.ResourceCPU), string(v1.ResourceMemory))
|
||||
|
||||
// QOSList is a set of (resource name, QoS class) pairs.
|
||||
type QOSList map[v1.ResourceName]v1.PodQOSClass
|
||||
@@ -44,7 +44,7 @@ func GetPodQOS(pod *v1.Pod) v1.PodQOSClass {
|
||||
}
|
||||
}
|
||||
// process limits
|
||||
qosLimitsFound := sets.NewString()
|
||||
qosLimitsFound := sets.New[string]()
|
||||
for name, quantity := range container.Resources.Limits {
|
||||
if !isSupportedQoSComputeResource(name) {
|
||||
continue
|
||||
|
||||
@@ -86,7 +86,7 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
ImagePullPolicy: "Always",
|
||||
Image: "kubernetes/pause",
|
||||
Image: "registry.k8s.io/pause",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: utilpointer.Bool(false),
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
var oneHourPodLifetimeSeconds uint = 3600
|
||||
@@ -131,7 +132,7 @@ func TestFailedPods(t *testing.T) {
|
||||
}
|
||||
|
||||
func initFailedJob(name, namespace string) *batchv1.Job {
|
||||
podSpec := MakePodSpec("", nil)
|
||||
podSpec := test.MakePodSpec("", nil)
|
||||
podSpec.Containers[0].Command = []string{"/bin/false"}
|
||||
podSpec.RestartPolicy = v1.RestartPolicyNever
|
||||
labelsSet := labels.Set{"test": name, "name": name}
|
||||
|
||||
@@ -176,7 +176,7 @@ func createDeployment(ctx context.Context, clientSet clientset.Interface, namesp
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
ImagePullPolicy: "Always",
|
||||
Image: "kubernetes/pause",
|
||||
Image: "registry.k8s.io/pause",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: utilpointer.Bool(false),
|
||||
|
||||
@@ -55,47 +55,9 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
RunAsNonRoot: utilpointer.Bool(true),
|
||||
RunAsUser: utilpointer.Int64(1000),
|
||||
RunAsGroup: utilpointer.Int64(1000),
|
||||
SeccompProfile: &v1.SeccompProfile{
|
||||
Type: v1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
ImagePullPolicy: "Never",
|
||||
Image: "kubernetes/pause",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("200Mi"),
|
||||
},
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: utilpointer.Bool(false),
|
||||
Capabilities: &v1.Capabilities{
|
||||
Drop: []v1.Capability{
|
||||
"ALL",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
PriorityClassName: priorityClassName,
|
||||
TerminationGracePeriodSeconds: gracePeriod,
|
||||
}
|
||||
}
|
||||
|
||||
// RcByNameContainer returns a ReplicationController with specified name and container
|
||||
func RcByNameContainer(name, namespace string, replicas int32, labels map[string]string, gracePeriod *int64, priorityClassName string) *v1.ReplicationController {
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
@@ -121,7 +83,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: MakePodSpec(priorityClassName, gracePeriod),
|
||||
Spec: test.MakePodSpec(priorityClassName, gracePeriod),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -331,7 +293,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
ImagePullPolicy: "Never",
|
||||
Image: "kubernetes/pause",
|
||||
Image: "registry.k8s.io/pause",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
@@ -1336,7 +1298,7 @@ func createBalancedPodForNodes(
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: "kubernetes/pause",
|
||||
Image: "registry.k8s.io/pause",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: needCreateResource,
|
||||
Requests: needCreateResource,
|
||||
|
||||
@@ -87,7 +87,7 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
ImagePullPolicy: "Always",
|
||||
Image: "kubernetes/pause",
|
||||
Image: "registry.k8s.io/pause",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "sleep 1s && exit 1"},
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
|
||||
@@ -2,18 +2,19 @@ package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
const zoneTopologyKey string = "topology.kubernetes.io/zone"
|
||||
@@ -35,49 +36,107 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||
|
||||
testCases := map[string]struct {
|
||||
replicaCount int
|
||||
maxSkew int
|
||||
labelKey string
|
||||
labelValue string
|
||||
constraint v1.UnsatisfiableConstraintAction
|
||||
expectedEvictedCount uint
|
||||
replicaCount int
|
||||
topologySpreadConstraint v1.TopologySpreadConstraint
|
||||
}{
|
||||
"test-rc-topology-spread-hard-constraint": {
|
||||
replicaCount: 4,
|
||||
maxSkew: 1,
|
||||
labelKey: "test",
|
||||
labelValue: "topology-spread-hard-constraint",
|
||||
constraint: v1.DoNotSchedule,
|
||||
"test-topology-spread-hard-constraint": {
|
||||
expectedEvictedCount: 1,
|
||||
replicaCount: 4,
|
||||
topologySpreadConstraint: v1.TopologySpreadConstraint{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"test": "topology-spread-hard-constraint",
|
||||
},
|
||||
},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
"test-rc-topology-spread-soft-constraint": {
|
||||
replicaCount: 4,
|
||||
maxSkew: 1,
|
||||
labelKey: "test",
|
||||
labelValue: "topology-spread-soft-constraint",
|
||||
constraint: v1.ScheduleAnyway,
|
||||
"test-topology-spread-soft-constraint": {
|
||||
expectedEvictedCount: 1,
|
||||
replicaCount: 4,
|
||||
topologySpreadConstraint: v1.TopologySpreadConstraint{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"test": "topology-spread-soft-constraint",
|
||||
},
|
||||
},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
},
|
||||
},
|
||||
"test-node-taints-policy-honor": {
|
||||
expectedEvictedCount: 1,
|
||||
replicaCount: 4,
|
||||
topologySpreadConstraint: v1.TopologySpreadConstraint{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"test": "node-taints-policy-honor",
|
||||
},
|
||||
},
|
||||
MaxSkew: 1,
|
||||
NodeTaintsPolicy: nodeInclusionPolicyRef(v1.NodeInclusionPolicyHonor),
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
"test-node-affinity-policy-ignore": {
|
||||
expectedEvictedCount: 1,
|
||||
replicaCount: 4,
|
||||
topologySpreadConstraint: v1.TopologySpreadConstraint{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"test": "node-taints-policy-honor",
|
||||
},
|
||||
},
|
||||
MaxSkew: 1,
|
||||
NodeAffinityPolicy: nodeInclusionPolicyRef(v1.NodeInclusionPolicyIgnore),
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
"test-match-label-keys": {
|
||||
expectedEvictedCount: 0,
|
||||
replicaCount: 4,
|
||||
topologySpreadConstraint: v1.TopologySpreadConstraint{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"test": "match-label-keys",
|
||||
},
|
||||
},
|
||||
MatchLabelKeys: []string{appsv1.DefaultDeploymentUniqueLabelKey},
|
||||
MaxSkew: 1,
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Logf("Creating RC %s with %d replicas", name, tc.replicaCount)
|
||||
rc := RcByNameContainer(name, testNamespace.Name, int32(tc.replicaCount), map[string]string{tc.labelKey: tc.labelValue}, nil, "")
|
||||
rc.Spec.Template.Spec.TopologySpreadConstraints = makeTopologySpreadConstraints(tc.maxSkew, tc.labelKey, tc.labelValue, tc.constraint)
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Error creating RC %s %v", name, err)
|
||||
t.Logf("Creating Deployment %s with %d replicas", name, tc.replicaCount)
|
||||
deployment := test.BuildTestDeployment(name, testNamespace.Name, int32(tc.replicaCount), tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, func(d *appsv1.Deployment) {
|
||||
d.Spec.Template.Spec.TopologySpreadConstraints = []v1.TopologySpreadConstraint{tc.topologySpreadConstraint}
|
||||
})
|
||||
if _, err := clientSet.AppsV1().Deployments(deployment.Namespace).Create(ctx, deployment, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Error creating Deployment %s %v", name, err)
|
||||
}
|
||||
defer deleteRC(ctx, t, clientSet, rc)
|
||||
waitForRCPodsRunning(ctx, t, clientSet, rc)
|
||||
defer test.DeleteDeployment(ctx, t, clientSet, deployment)
|
||||
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, deployment)
|
||||
|
||||
// Create a "Violator" RC that has the same label and is forced to be on the same node using a nodeSelector
|
||||
violatorRcName := name + "-violator"
|
||||
violatorCount := tc.maxSkew + 1
|
||||
violatorRc := RcByNameContainer(violatorRcName, testNamespace.Name, int32(violatorCount), map[string]string{tc.labelKey: tc.labelValue}, nil, "")
|
||||
violatorRc.Spec.Template.Spec.NodeSelector = map[string]string{zoneTopologyKey: workerNodes[0].Labels[zoneTopologyKey]}
|
||||
rc.Spec.Template.Spec.TopologySpreadConstraints = makeTopologySpreadConstraints(tc.maxSkew, tc.labelKey, tc.labelValue, tc.constraint)
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, violatorRc, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Error creating RC %s: %v", violatorRcName, err)
|
||||
// Create a "Violator" Deployment that has the same label and is forced to be on the same node using a nodeSelector
|
||||
violatorDeploymentName := name + "-violator"
|
||||
violatorCount := tc.topologySpreadConstraint.MaxSkew + 1
|
||||
violatorDeployment := test.BuildTestDeployment(violatorDeploymentName, testNamespace.Name, violatorCount, tc.topologySpreadConstraint.LabelSelector.DeepCopy().MatchLabels, func(d *appsv1.Deployment) {
|
||||
d.Spec.Template.Spec.NodeSelector = map[string]string{zoneTopologyKey: workerNodes[0].Labels[zoneTopologyKey]}
|
||||
})
|
||||
if _, err := clientSet.AppsV1().Deployments(deployment.Namespace).Create(ctx, violatorDeployment, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Error creating Deployment %s: %v", violatorDeploymentName, err)
|
||||
}
|
||||
defer deleteRC(ctx, t, clientSet, violatorRc)
|
||||
waitForRCPodsRunning(ctx, t, clientSet, violatorRc)
|
||||
defer test.DeleteDeployment(ctx, t, clientSet, violatorDeployment)
|
||||
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, violatorDeployment)
|
||||
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
|
||||
|
||||
@@ -103,7 +162,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}
|
||||
|
||||
plugin, err := removepodsviolatingtopologyspreadconstraint.New(&removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
IncludeSoftConstraints: tc.constraint != v1.DoNotSchedule,
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{tc.topologySpreadConstraint.WhenUnsatisfiable},
|
||||
},
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: clientSet,
|
||||
@@ -120,18 +179,23 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
t.Logf("Finished RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
|
||||
|
||||
t.Logf("Wait for terminating pods of %s to disappear", name)
|
||||
waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)
|
||||
waitForTerminatingPodsToDisappear(ctx, t, clientSet, deployment.Namespace)
|
||||
|
||||
if totalEvicted := podEvictor.TotalEvicted(); totalEvicted > 0 {
|
||||
if totalEvicted := podEvictor.TotalEvicted(); totalEvicted == tc.expectedEvictedCount {
|
||||
t.Logf("Total of %d Pods were evicted for %s", totalEvicted, name)
|
||||
} else {
|
||||
t.Fatalf("Pods were not evicted for %s TopologySpreadConstraint", name)
|
||||
t.Fatalf("Expected %d evictions but got %d for %s TopologySpreadConstraint", tc.expectedEvictedCount, totalEvicted, name)
|
||||
}
|
||||
|
||||
if tc.expectedEvictedCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure recently evicted Pod are rescheduled and running before asserting for a balanced topology spread
|
||||
waitForRCPodsRunning(ctx, t, clientSet, rc)
|
||||
test.WaitForDeploymentPodsRunning(ctx, t, clientSet, deployment)
|
||||
|
||||
pods, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", tc.labelKey, tc.labelValue)})
|
||||
listOptions := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(tc.topologySpreadConstraint.LabelSelector.MatchLabels).String()}
|
||||
pods, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, listOptions)
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods for %s: %v", name, err)
|
||||
}
|
||||
@@ -146,26 +210,15 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}
|
||||
|
||||
min, max := getMinAndMaxPodDistribution(nodePodCountMap)
|
||||
if max-min > tc.maxSkew {
|
||||
t.Errorf("Pod distribution for %s is still violating the max skew of %d as it is %d", name, tc.maxSkew, max-min)
|
||||
if max-min > int(tc.topologySpreadConstraint.MaxSkew) {
|
||||
t.Errorf("Pod distribution for %s is still violating the max skew of %d as it is %d", name, tc.topologySpreadConstraint.MaxSkew, max-min)
|
||||
}
|
||||
|
||||
t.Logf("Pods for %s were distributed in line with max skew of %d", name, tc.maxSkew)
|
||||
t.Logf("Pods for %s were distributed in line with max skew of %d", name, tc.topologySpreadConstraint.MaxSkew)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func makeTopologySpreadConstraints(maxSkew int, labelKey, labelValue string, constraint v1.UnsatisfiableConstraintAction) []v1.TopologySpreadConstraint {
|
||||
return []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: int32(maxSkew),
|
||||
TopologyKey: zoneTopologyKey,
|
||||
WhenUnsatisfiable: constraint,
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{labelKey: labelValue}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getMinAndMaxPodDistribution(nodePodCountMap map[string]int) (int, int) {
|
||||
min := math.MaxInt32
|
||||
max := math.MinInt32
|
||||
@@ -180,3 +233,7 @@ func getMinAndMaxPodDistribution(nodePodCountMap map[string]int) (int, int) {
|
||||
|
||||
return min, max
|
||||
}
|
||||
|
||||
func nodeInclusionPolicyRef(policy v1.NodeInclusionPolicy) *v1.NodeInclusionPolicy {
|
||||
return &policy
|
||||
}
|
||||
|
||||
@@ -25,16 +25,16 @@ SKIP_INSTALL=${SKIP_INSTALL:-}
|
||||
if [ -n "$KIND_E2E" ]; then
|
||||
# If we did not set SKIP_INSTALL
|
||||
if [ -z "$SKIP_INSTALL" ]; then
|
||||
K8S_VERSION=${KUBERNETES_VERSION:-v1.27.0}
|
||||
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && mv kubectl /usr/local/bin/
|
||||
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.18.0/kind-linux-amd64
|
||||
K8S_VERSION=${KUBERNETES_VERSION:-v1.28.0}
|
||||
curl -Lo kubectl https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && mv kubectl /usr/local/bin/
|
||||
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.20.0/kind-linux-amd64
|
||||
chmod +x kind-linux-amd64
|
||||
mv kind-linux-amd64 kind
|
||||
export PATH=$PATH:$PWD
|
||||
kind create cluster --image kindest/node:${K8S_VERSION} --config=./hack/kind_config.yaml
|
||||
fi
|
||||
${CONTAINER_ENGINE:-docker} pull kubernetes/pause
|
||||
kind load docker-image kubernetes/pause
|
||||
${CONTAINER_ENGINE:-docker} pull registry.k8s.io/pause
|
||||
kind load docker-image registry.k8s.io/pause
|
||||
kind get kubeconfig > /tmp/admin.conf
|
||||
export KUBECONFIG="/tmp/admin.conf"
|
||||
mkdir -p ~/gopath/src/sigs.k8s.io/
|
||||
|
||||
@@ -17,13 +17,58 @@ limitations under the License.
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func BuildTestDeployment(name, namespace string, replicas int32, labels map[string]string, apply func(deployment *appsv1.Deployment)) *appsv1.Deployment {
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
labels["name"] = name
|
||||
|
||||
deployment := &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: utilpointer.Int32(replicas),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: MakePodSpec("", utilpointer.Int64(0)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if apply != nil {
|
||||
apply(deployment)
|
||||
}
|
||||
|
||||
return deployment
|
||||
}
|
||||
|
||||
// BuildTestPod creates a test pod with given parameters.
|
||||
func BuildTestPod(name string, cpu, memory int64, nodeName string, apply func(*v1.Pod)) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
@@ -124,6 +169,45 @@ func BuildTestNode(name string, millicpu, mem, pods int64, apply func(*v1.Node))
|
||||
return node
|
||||
}
|
||||
|
||||
func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
RunAsNonRoot: utilpointer.Bool(true),
|
||||
RunAsUser: utilpointer.Int64(1000),
|
||||
RunAsGroup: utilpointer.Int64(1000),
|
||||
SeccompProfile: &v1.SeccompProfile{
|
||||
Type: v1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
ImagePullPolicy: "Never",
|
||||
Image: "registry.k8s.io/pause",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("200Mi"),
|
||||
},
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: utilpointer.Bool(false),
|
||||
Capabilities: &v1.Capabilities{
|
||||
Drop: []v1.Capability{
|
||||
"ALL",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
PriorityClassName: priorityClassName,
|
||||
TerminationGracePeriodSeconds: gracePeriod,
|
||||
}
|
||||
}
|
||||
|
||||
// MakeBestEffortPod makes the given pod a BestEffort pod
|
||||
func MakeBestEffortPod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Requests = nil
|
||||
@@ -179,8 +263,77 @@ func SetPodExtendedResourceRequest(pod *v1.Pod, resourceName v1.ResourceName, re
|
||||
pod.Spec.Containers[0].Resources.Requests[resourceName] = *resource.NewQuantity(requestQuantity, resource.DecimalSI)
|
||||
}
|
||||
|
||||
// SetNodeExtendedResouces sets the given node's extended resources
|
||||
// SetNodeExtendedResource sets the given node's extended resources
|
||||
func SetNodeExtendedResource(node *v1.Node, resourceName v1.ResourceName, requestQuantity int64) {
|
||||
node.Status.Capacity[resourceName] = *resource.NewQuantity(requestQuantity, resource.DecimalSI)
|
||||
node.Status.Allocatable[resourceName] = *resource.NewQuantity(requestQuantity, resource.DecimalSI)
|
||||
}
|
||||
|
||||
func DeleteDeployment(ctx context.Context, t *testing.T, clientSet clientset.Interface, deployment *appsv1.Deployment) {
|
||||
// set number of replicas to 0
|
||||
deploymentCopy := deployment.DeepCopy()
|
||||
deploymentCopy.Spec.Replicas = utilpointer.Int32(0)
|
||||
if _, err := clientSet.AppsV1().Deployments(deploymentCopy.Namespace).Update(ctx, deploymentCopy, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("Error updating replica controller %v", err)
|
||||
}
|
||||
|
||||
// wait 30 seconds until all pods are deleted
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(c context.Context) (bool, error) {
|
||||
scale, err := clientSet.AppsV1().Deployments(deployment.Namespace).GetScale(c, deployment.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return scale.Spec.Replicas == 0, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error deleting Deployment pods %v", err)
|
||||
}
|
||||
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(c context.Context) (bool, error) {
|
||||
podList, _ := clientSet.CoreV1().Pods(deployment.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(deployment.Spec.Template.Labels).String()})
|
||||
t.Logf("Waiting for %v Deployment pods to disappear, still %v remaining", deployment.Name, len(podList.Items))
|
||||
if len(podList.Items) > 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for Deployment pods to disappear: %v", err)
|
||||
}
|
||||
|
||||
if err := clientSet.AppsV1().Deployments(deployment.Namespace).Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("Error deleting Deployment %v", err)
|
||||
}
|
||||
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(c context.Context) (bool, error) {
|
||||
_, err := clientSet.AppsV1().Deployments(deployment.Namespace).Get(ctx, deployment.Name, metav1.GetOptions{})
|
||||
if err != nil && strings.Contains(err.Error(), "not found") {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error deleting Deployment %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func WaitForDeploymentPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, deployment *appsv1.Deployment) {
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(c context.Context) (bool, error) {
|
||||
podList, err := clientSet.CoreV1().Pods(deployment.Namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(deployment.Spec.Template.ObjectMeta.Labels).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(podList.Items) != int(*deployment.Spec.Replicas) {
|
||||
t.Logf("Waiting for %v pods to be created, got %v instead", *deployment.Spec.Replicas, len(podList.Items))
|
||||
return false, nil
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase != v1.PodRunning {
|
||||
t.Logf("Pod %v not running yet, is %v instead", pod.Name, pod.Status.Phase)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for pods running: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
68
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
generated
vendored
Normal file
68
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
Package antlr implements the Go version of the ANTLR 4 runtime.
|
||||
|
||||
# The ANTLR Tool
|
||||
|
||||
ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
|
||||
or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
|
||||
From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
|
||||
(or visitor) that makes it easy to respond to the recognition of phrases of interest.
|
||||
|
||||
# Code Generation
|
||||
|
||||
ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
|
||||
runtime library, written specifically to support the generated code in the target language. This library is the
|
||||
runtime for the Go target.
|
||||
|
||||
To generate code for the go target, it is generally recommended to place the source grammar files in a package of
|
||||
their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
|
||||
it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
|
||||
that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other
|
||||
way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
|
||||
your IDE, or configuration in your CI system.
|
||||
|
||||
Here is a general template for an ANTLR based recognizer in Go:
|
||||
|
||||
.
|
||||
├── myproject
|
||||
├── parser
|
||||
│ ├── mygrammar.g4
|
||||
│ ├── antlr-4.12.0-complete.jar
|
||||
│ ├── error_listeners.go
|
||||
│ ├── generate.go
|
||||
│ ├── generate.sh
|
||||
├── go.mod
|
||||
├── go.sum
|
||||
├── main.go
|
||||
└── main_test.go
|
||||
|
||||
Make sure that the package statement in your grammar file(s) reflects the go package they exist in.
|
||||
The generate.go file then looks like this:
|
||||
|
||||
package parser
|
||||
|
||||
//go:generate ./generate.sh
|
||||
|
||||
And the generate.sh file will look similar to this:
|
||||
|
||||
#!/bin/sh
|
||||
|
||||
alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
|
||||
antlr4 -Dlanguage=Go -no-visitor -package parser *.g4
|
||||
|
||||
depending on whether you want visitors or listeners or any other ANTLR options.
|
||||
|
||||
From the command line at the root of your package “myproject” you can then simply issue the command:
|
||||
|
||||
go generate ./...
|
||||
|
||||
# Copyright Notice
|
||||
|
||||
Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
|
||||
Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
|
||||
|
||||
[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
|
||||
[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
|
||||
*/
|
||||
package antlr
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -6,11 +6,24 @@ package antlr
|
||||
|
||||
import "sync"
|
||||
|
||||
// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or
|
||||
// which is invalid for a particular struct such as [*antlr.BaseRuleContext]
|
||||
var ATNInvalidAltNumber int
|
||||
|
||||
// ATN represents an “[Augmented Transition Network]”, though general in ANTLR the term
|
||||
// “Augmented Recursive Transition Network” though there are some descriptions of “[Recursive Transition Network]”
|
||||
// in existence.
|
||||
//
|
||||
// ATNs represent the main networks in the system and are serialized by the code generator and support [ALL(*)].
|
||||
//
|
||||
// [Augmented Transition Network]: https://en.wikipedia.org/wiki/Augmented_transition_network
|
||||
// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
|
||||
// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
|
||||
type ATN struct {
|
||||
// DecisionToState is the decision points for all rules, subrules, optional
|
||||
// blocks, ()+, ()*, etc. Used to build DFA predictors for them.
|
||||
// blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we
|
||||
// can go back later and build DFA predictors for them. This includes
|
||||
// all the rules, subrules, optional blocks, ()+, ()* etc...
|
||||
DecisionToState []DecisionState
|
||||
|
||||
// grammarType is the ATN type and is used for deserializing ATNs from strings.
|
||||
@@ -45,6 +58,8 @@ type ATN struct {
|
||||
edgeMu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewATN returns a new ATN struct representing the given grammarType and is used
|
||||
// for runtime deserialization of ATNs from the code generated by the ANTLR tool
|
||||
func NewATN(grammarType int, maxTokenType int) *ATN {
|
||||
return &ATN{
|
||||
grammarType: grammarType,
|
||||
@@ -53,7 +68,7 @@ func NewATN(grammarType int, maxTokenType int) *ATN {
|
||||
}
|
||||
}
|
||||
|
||||
// NextTokensInContext computes the set of valid tokens that can occur starting
|
||||
// NextTokensInContext computes and returns the set of valid tokens that can occur starting
|
||||
// in state s. If ctx is nil, the set of tokens will not include what can follow
|
||||
// the rule surrounding s. In other words, the set will be restricted to tokens
|
||||
// reachable staying within the rule of s.
|
||||
@@ -61,8 +76,8 @@ func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
|
||||
return NewLL1Analyzer(a).Look(s, nil, ctx)
|
||||
}
|
||||
|
||||
// NextTokensNoContext computes the set of valid tokens that can occur starting
|
||||
// in s and staying in same rule. Token.EPSILON is in set if we reach end of
|
||||
// NextTokensNoContext computes and returns the set of valid tokens that can occur starting
|
||||
// in state s and staying in same rule. [antlr.Token.EPSILON] is in set if we reach end of
|
||||
// rule.
|
||||
func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
|
||||
a.mu.Lock()
|
||||
@@ -76,6 +91,8 @@ func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
|
||||
return iset
|
||||
}
|
||||
|
||||
// NextTokens computes and returns the set of valid tokens starting in state s, by
|
||||
// calling either [NextTokensNoContext] (ctx == nil) or [NextTokensInContext] (ctx != nil).
|
||||
func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
|
||||
if ctx == nil {
|
||||
return a.NextTokensNoContext(s)
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -8,19 +8,14 @@ import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type comparable interface {
|
||||
equals(other interface{}) bool
|
||||
}
|
||||
|
||||
// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
|
||||
// context). The syntactic context is a graph-structured stack node whose
|
||||
// path(s) to the root is the rule invocation(s) chain used to arrive at the
|
||||
// state. The semantic context is the tree of semantic predicates encountered
|
||||
// before reaching an ATN state.
|
||||
type ATNConfig interface {
|
||||
comparable
|
||||
|
||||
hash() int
|
||||
Equals(o Collectable[ATNConfig]) bool
|
||||
Hash() int
|
||||
|
||||
GetState() ATNState
|
||||
GetAlt() int
|
||||
@@ -47,7 +42,7 @@ type BaseATNConfig struct {
|
||||
reachesIntoOuterContext int
|
||||
}
|
||||
|
||||
func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup
|
||||
func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup
|
||||
return &BaseATNConfig{
|
||||
state: old.state,
|
||||
alt: old.alt,
|
||||
@@ -135,11 +130,16 @@ func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
|
||||
b.reachesIntoOuterContext = v
|
||||
}
|
||||
|
||||
// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
|
||||
// for a collection.
|
||||
//
|
||||
// An ATN configuration is equal to another if both have the same state, they
|
||||
// predict the same alternative, and syntactic/semantic contexts are the same.
|
||||
func (b *BaseATNConfig) equals(o interface{}) bool {
|
||||
func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool {
|
||||
if b == o {
|
||||
return true
|
||||
} else if o == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var other, ok = o.(*BaseATNConfig)
|
||||
@@ -153,30 +153,32 @@ func (b *BaseATNConfig) equals(o interface{}) bool {
|
||||
if b.context == nil {
|
||||
equal = other.context == nil
|
||||
} else {
|
||||
equal = b.context.equals(other.context)
|
||||
equal = b.context.Equals(other.context)
|
||||
}
|
||||
|
||||
var (
|
||||
nums = b.state.GetStateNumber() == other.state.GetStateNumber()
|
||||
alts = b.alt == other.alt
|
||||
cons = b.semanticContext.equals(other.semanticContext)
|
||||
cons = b.semanticContext.Equals(other.semanticContext)
|
||||
sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
|
||||
)
|
||||
|
||||
return nums && alts && cons && sups && equal
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) hash() int {
|
||||
// Hash is the default hash function for BaseATNConfig, when no specialist hash function
|
||||
// is required for a collection
|
||||
func (b *BaseATNConfig) Hash() int {
|
||||
var c int
|
||||
if b.context != nil {
|
||||
c = b.context.hash()
|
||||
c = b.context.Hash()
|
||||
}
|
||||
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, b.state.GetStateNumber())
|
||||
h = murmurUpdate(h, b.alt)
|
||||
h = murmurUpdate(h, c)
|
||||
h = murmurUpdate(h, b.semanticContext.hash())
|
||||
h = murmurUpdate(h, b.semanticContext.Hash())
|
||||
return murmurFinish(h, 4)
|
||||
}
|
||||
|
||||
@@ -243,7 +245,9 @@ func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *Lex
|
||||
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
|
||||
}
|
||||
|
||||
func (l *LexerATNConfig) hash() int {
|
||||
// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via
|
||||
// the default comparator [ObjEqComparator].
|
||||
func (l *LexerATNConfig) Hash() int {
|
||||
var f int
|
||||
if l.passedThroughNonGreedyDecision {
|
||||
f = 1
|
||||
@@ -253,15 +257,20 @@ func (l *LexerATNConfig) hash() int {
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, l.state.GetStateNumber())
|
||||
h = murmurUpdate(h, l.alt)
|
||||
h = murmurUpdate(h, l.context.hash())
|
||||
h = murmurUpdate(h, l.semanticContext.hash())
|
||||
h = murmurUpdate(h, l.context.Hash())
|
||||
h = murmurUpdate(h, l.semanticContext.Hash())
|
||||
h = murmurUpdate(h, f)
|
||||
h = murmurUpdate(h, l.lexerActionExecutor.hash())
|
||||
h = murmurUpdate(h, l.lexerActionExecutor.Hash())
|
||||
h = murmurFinish(h, 6)
|
||||
return h
|
||||
}
|
||||
|
||||
func (l *LexerATNConfig) equals(other interface{}) bool {
|
||||
// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via
|
||||
// the default comparator [ObjEqComparator].
|
||||
func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool {
|
||||
if l == other {
|
||||
return true
|
||||
}
|
||||
var othert, ok = other.(*LexerATNConfig)
|
||||
|
||||
if l == other {
|
||||
@@ -275,7 +284,7 @@ func (l *LexerATNConfig) equals(other interface{}) bool {
|
||||
var b bool
|
||||
|
||||
if l.lexerActionExecutor != nil {
|
||||
b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor)
|
||||
b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor)
|
||||
} else {
|
||||
b = othert.lexerActionExecutor != nil
|
||||
}
|
||||
@@ -284,10 +293,9 @@ func (l *LexerATNConfig) equals(other interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
return l.BaseATNConfig.equals(othert.BaseATNConfig)
|
||||
return l.BaseATNConfig.Equals(othert.BaseATNConfig)
|
||||
}
|
||||
|
||||
|
||||
func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
|
||||
var ds, ok = target.(DecisionState)
|
||||
|
||||
@@ -1,24 +1,25 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type ATNConfigSet interface {
|
||||
hash() int
|
||||
Hash() int
|
||||
Equals(o Collectable[ATNConfig]) bool
|
||||
Add(ATNConfig, *DoubleDict) bool
|
||||
AddAll([]ATNConfig) bool
|
||||
|
||||
GetStates() Set
|
||||
GetStates() *JStore[ATNState, Comparator[ATNState]]
|
||||
GetPredicates() []SemanticContext
|
||||
GetItems() []ATNConfig
|
||||
|
||||
OptimizeConfigs(interpreter *BaseATNSimulator)
|
||||
|
||||
Equals(other interface{}) bool
|
||||
|
||||
Length() int
|
||||
IsEmpty() bool
|
||||
Contains(ATNConfig) bool
|
||||
@@ -57,7 +58,7 @@ type BaseATNConfigSet struct {
|
||||
// effectively doubles the number of objects associated with ATNConfigs. All
|
||||
// keys are hashed by (s, i, _, pi), not including the context. Wiped out when
|
||||
// read-only because a set becomes a DFA state.
|
||||
configLookup Set
|
||||
configLookup *JStore[ATNConfig, Comparator[ATNConfig]]
|
||||
|
||||
// configs is the added elements.
|
||||
configs []ATNConfig
|
||||
@@ -83,7 +84,7 @@ type BaseATNConfigSet struct {
|
||||
|
||||
// readOnly is whether it is read-only. Do not
|
||||
// allow any code to manipulate the set if true because DFA states will point at
|
||||
// sets and those must not change. It not protect other fields; conflictingAlts
|
||||
// sets and those must not change. It not, protect other fields; conflictingAlts
|
||||
// in particular, which is assigned after readOnly.
|
||||
readOnly bool
|
||||
|
||||
@@ -104,7 +105,7 @@ func (b *BaseATNConfigSet) Alts() *BitSet {
|
||||
func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
|
||||
return &BaseATNConfigSet{
|
||||
cachedHash: -1,
|
||||
configLookup: newArray2DHashSetWithCap(hashATNConfig, equalATNConfigs, 16, 2),
|
||||
configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst),
|
||||
fullCtx: fullCtx,
|
||||
}
|
||||
}
|
||||
@@ -126,9 +127,11 @@ func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
|
||||
b.dipsIntoOuterContext = true
|
||||
}
|
||||
|
||||
existing := b.configLookup.Add(config).(ATNConfig)
|
||||
existing, present := b.configLookup.Put(config)
|
||||
|
||||
if existing == config {
|
||||
// The config was not already in the set
|
||||
//
|
||||
if !present {
|
||||
b.cachedHash = -1
|
||||
b.configs = append(b.configs, config) // Track order here
|
||||
return true
|
||||
@@ -154,11 +157,14 @@ func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetStates() Set {
|
||||
states := newArray2DHashSet(nil, nil)
|
||||
func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
|
||||
|
||||
// states uses the standard comparator provided by the ATNState instance
|
||||
//
|
||||
states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst)
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
states.Add(b.configs[i].GetState())
|
||||
states.Put(b.configs[i].GetState())
|
||||
}
|
||||
|
||||
return states
|
||||
@@ -214,7 +220,34 @@ func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Equals(other interface{}) bool {
|
||||
// Compare is a hack function just to verify that adding DFAstares to the known
|
||||
// set works, so long as comparison of ATNConfigSet s works. For that to work, we
|
||||
// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't
|
||||
// know the order, so we do this inefficient hack. If this proves the point, then
|
||||
// we can change the config set to a better structure.
|
||||
func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool {
|
||||
if len(b.configs) != len(bs.configs) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, c := range b.configs {
|
||||
found := false
|
||||
for _, c2 := range bs.configs {
|
||||
if c.Equals(c2) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
|
||||
if b == other {
|
||||
return true
|
||||
} else if _, ok := other.(*BaseATNConfigSet); !ok {
|
||||
@@ -224,15 +257,15 @@ func (b *BaseATNConfigSet) Equals(other interface{}) bool {
|
||||
other2 := other.(*BaseATNConfigSet)
|
||||
|
||||
return b.configs != nil &&
|
||||
// TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary?
|
||||
b.fullCtx == other2.fullCtx &&
|
||||
b.uniqueAlt == other2.uniqueAlt &&
|
||||
b.conflictingAlts == other2.conflictingAlts &&
|
||||
b.hasSemanticContext == other2.hasSemanticContext &&
|
||||
b.dipsIntoOuterContext == other2.dipsIntoOuterContext
|
||||
b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
|
||||
b.Compare(other2)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) hash() int {
|
||||
func (b *BaseATNConfigSet) Hash() int {
|
||||
if b.readOnly {
|
||||
if b.cachedHash == -1 {
|
||||
b.cachedHash = b.hashCodeConfigs()
|
||||
@@ -247,7 +280,7 @@ func (b *BaseATNConfigSet) hash() int {
|
||||
func (b *BaseATNConfigSet) hashCodeConfigs() int {
|
||||
h := 1
|
||||
for _, config := range b.configs {
|
||||
h = 31*h + config.hash()
|
||||
h = 31*h + config.Hash()
|
||||
}
|
||||
return h
|
||||
}
|
||||
@@ -283,7 +316,7 @@ func (b *BaseATNConfigSet) Clear() {
|
||||
|
||||
b.configs = make([]ATNConfig, 0)
|
||||
b.cachedHash = -1
|
||||
b.configLookup = newArray2DHashSet(nil, equalATNConfigs)
|
||||
b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) FullContext() bool {
|
||||
@@ -365,7 +398,8 @@ type OrderedATNConfigSet struct {
|
||||
func NewOrderedATNConfigSet() *OrderedATNConfigSet {
|
||||
b := NewBaseATNConfigSet(false)
|
||||
|
||||
b.configLookup = newArray2DHashSet(nil, nil)
|
||||
// This set uses the standard Hash() and Equals() from ATNConfig
|
||||
b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
|
||||
|
||||
return &OrderedATNConfigSet{BaseATNConfigSet: b}
|
||||
}
|
||||
@@ -375,7 +409,7 @@ func hashATNConfig(i interface{}) int {
|
||||
hash := 7
|
||||
hash = 31*hash + o.GetState().GetStateNumber()
|
||||
hash = 31*hash + o.GetAlt()
|
||||
hash = 31*hash + o.GetSemanticContext().hash()
|
||||
hash = 31*hash + o.GetSemanticContext().Hash()
|
||||
return hash
|
||||
}
|
||||
|
||||
@@ -403,5 +437,5 @@ func equalATNConfigs(a, b interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
return ai.GetSemanticContext().equals(bi.GetSemanticContext())
|
||||
return ai.GetSemanticContext().Equals(bi.GetSemanticContext())
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user