1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Compare commits

...

60 Commits

Author SHA1 Message Date
Kubernetes Prow Robot
bae120929e Merge pull request #1224 from a7i/docs-1.28
k8s 1.28: update docs and go-version
2023-08-23 08:53:29 -07:00
Amir Alavi
30dd78dcee k8s 1.28: update docs and go-version
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2023-08-22 20:46:27 -04:00
Kubernetes Prow Robot
1d4dc57ad1 Merge pull request #1221 from antoinedeschenes/fix-profile-span-attribute
profile: fix span attribute typo
2023-08-22 15:39:55 -07:00
Antoine Deschênes
3f0c06b58d profile: fix span attribute typo
Fix profile attribute "prpfile" typo in trace spans.

Signed-off-by: Antoine Deschênes <antoine.deschenes@linux.com>
2023-08-22 13:10:20 -04:00
Kubernetes Prow Robot
bb4721049f Merge pull request #1216 from JaneLiuL/master
Bump Kubernetes dependencies to v1.28.0
2023-08-21 07:21:24 -07:00
Amir Alavi
78c9ae851c upgrade kind to v0.20.0 2023-08-21 10:04:31 -04:00
Kubernetes Prow Robot
9660a7b469 Merge pull request #1202 from a7i/gitattributes
.gitattribute to not pollute PRs or stats
2023-08-21 05:09:23 -07:00
JaneLiuL
81aa897c48 Bump Kubernetes dependencies to v1.28.0 2023-08-17 15:59:15 +08:00
Kubernetes Prow Robot
f4c64c2c75 Merge pull request #1215 from Abirdcfly/master
fix: descheduler_loop_duration_seconds has wrong value
2023-08-10 01:05:27 -07:00
Abirdcfly
ca5781827a fix: descheduler_loop_duration_seconds has wrong value
Signed-off-by: Abirdcfly <fp544037857@gmail.com>
2023-08-09 21:36:53 +08:00
Jordi Piqué Sellés
31704047c5 feat: Implement preferredDuringSchedulingIgnoredDuringExecution for RemovePodsViolatingNodeAffinity (#1210)
* feat: Implement preferredDuringSchedulingIgnoredDuringExecution for RemovePodsViolatingNodeAffinity

Now, the descheduler can detect and evict pods that are not optimally
allocated according to the "preferred..." node affinity. It only evicts
a pod if it can be scheduled on a node that scores higher in terms of
preferred node affinity than the current one.

This can be activated by enabling the RemovePodsViolatingNodeAffinity
plugin and passing "preferredDuringSchedulingIgnoredDuringExecution" in
the args.

For example, imagine we have a pod that prefers nodes with label "key1:
value1" with a weight of 10. If this pod is scheduled on a node that
doesn't have "key1: value1" as label but there's another node that has
this label and where this pod can potentially run, then the descheduler
will evict the pod.

Another effect of this commit is that the
RemovePodsViolatingNodeAffinity plugin will not remove pods that don't
fit in the current node but for other reasons than violating the node
affinity. Before that, enabling this plugin could cause evictions on
pods that were running on tainted nodes without the necessary
tolerations.

This commit also fixes the wording of some tests from
node_affinity_test.go and some parameters and expectations of these
tests, which were wrong.

* Optimization on RemovePodsViolatingNodeAffinity

Before checking if a pod can be evicted or if it can be scheduled
somewhere else, we first check if it has the corresponding nodeAffinity
field defined. Otherwise, the pod is automatically discarded as a
candidate.

Apart from that, the method that calculates the weight that a pod
gives to a node based on its preferred node affinity has been
renamed to better reflect what it does.
2023-08-04 03:08:21 -07:00
Kubernetes Prow Robot
1be0ab2bd1 Merge pull request #1207 from a7i/amir/fix-cm-RemoveDuplicates
fix: base configmap missing plugin RemoveDuplicates
2023-07-26 07:06:19 -07:00
Amir Alavi
f8442fbb0d fix: base configmap missing plugin RemoveDuplicates 2023-07-25 22:00:49 -04:00
Kubernetes Prow Robot
f7b7f50b92 Merge pull request #1203 from lucming/aggregate-errors
nodefit: aggregate errors
2023-07-24 08:52:13 -07:00
lucming
27a436b98a aggregate errors 2023-07-24 23:39:26 +08:00
Kubernetes Prow Robot
8f7dea10c1 Merge pull request #1201 from a7i/1.28-beta.0
bump to k8s 1.28 beta.0
2023-07-24 06:44:15 -07:00
Amir Alavi
68f43fe591 .gitattribute to not pollute PRs or stats 2023-07-22 11:01:59 -04:00
Amir Alavi
a0cbfcfbca regenerate docs for cli 2023-07-21 22:16:36 -04:00
Amir Alavi
2a91eda30d bump to k8s 1.28 beta.0 2023-07-21 22:06:55 -04:00
Kubernetes Prow Robot
06dc26a83e Merge pull request #1189 from harshanarayana/feature/enable-otel
feat: Enable open telemetry tracing
2023-07-18 04:43:10 -07:00
Harsha Narayana
add9d6e897 enable Open telemetry support for descheduler
1. Enable OTEL configuration and base framework
2. update generated conversion spec
3. enable docker based conversion and deep copy generate
4. fix broken unit tests
2023-07-18 11:14:21 +05:30
Kubernetes Prow Robot
d9b763a28b Merge pull request #1186 from knelasevero/fix_priority_name
fix priority threshold by name alone
2023-07-10 04:17:08 -07:00
Lucas Severo Alves
3ff38bab59 fix priority threshold by name alone 2023-07-07 16:12:28 +02:00
Kubernetes Prow Robot
ed1554dd19 Merge pull request #1141 from a7i/processing-log-level
bump log level for processing info
2023-07-05 10:46:58 -07:00
Jan Chaloupka
931aac9c71 deschedule/balance order (continuation) (#1177)
* generalise RunDeschedulerLoop and RunProfiles and stabilish deschedule/balance order

* assign nodes outside RunDeschedulerLoop and use instanced profiles

* stop exporting internal profile bits

* refactoring RunProfiles and add methods to Deschduler

* types outside function

* shutdown eventBroadcaster outside NewDescheduler

* all new methods inside descheduler.go

* avoid exporting all Descheduler fields

* Address review comments

---------

Co-authored-by: Lucas Severo Alves <lucassalves65@gmail.com>
2023-06-21 06:37:40 -07:00
Kubernetes Prow Robot
a497541f39 Merge pull request #1174 from ingvagabund/bump-fake-watch-channel-size
FakeClientset: bump watch channel size
2023-06-20 10:58:21 -07:00
Amir Alavi
333b5cfbb6 bump log level for processing info 2023-06-16 08:39:05 -04:00
Kubernetes Prow Robot
eb2372137e Merge pull request #1148 from a7i/tsc-constraints
removepodsviolatingtopologyspreadconstraint: implement explicit constraints
2023-06-16 05:36:23 -07:00
Amir Alavi
7f2f6f2b16 removepodsviolatingtopologyspreadconstraint: implement explicit constraints 2023-06-16 08:20:26 -04:00
Amir Alavi
5f0edb5f93 removepodsviolatingtopologyspreadconstraint: topologyBalanceNodeFit to control whether to perform nodefit when balacning domains 2023-06-16 07:56:32 -04:00
Amir Alavi
f5a7f716b3 use pod informers for listing pods in removepodsviolatingtopologyspreadconstraint and removepodsviolatinginterpodantiaffinity (#1163)
* use pod informers for listing pods in removepodsviolatingtopologyspreadconstraint and removepodsviolatinginterpodantiaffinity

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>

* workaround in topologyspreadconstraint test to ensure that informer's index returns pods sorted by name

---------

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2023-06-15 23:30:19 -07:00
Kubernetes Prow Robot
5163ac3ace Merge pull request #1173 from jongwooo/chore/replace-deprecated-command-with-environment-file
Replace deprecated command with environment file
2023-06-15 06:52:19 -07:00
Jan Chaloupka
4c272e6ea2 FakeClientset: bump watch channel size
Huge clusters with thousands of pods can quickly exceed the default
watch channel size of the fake clientset. Causing the channel
to panic with "channel full".
2023-06-15 11:07:09 +02:00
Jongwoo Han
c637b7f0e6 Replace deprecated command with environment file 2023-06-15 12:12:44 +09:00
Kubernetes Prow Robot
5462a599af Merge pull request #1165 from a7i/toomanyrestarts-CrashLoopBackOff
TooManyRestart: state filter for CrashLoopBackOff
2023-06-13 09:47:59 -07:00
Kubernetes Prow Robot
7dcd7fa50f Merge pull request #1166 from a7i/pause-image
update pause image from 'kubernetes/pause' to 'registry.k8s.io/pause'
2023-06-08 01:46:13 -07:00
Amir Alavi
6271d51125 update pause image from 'kubernetes/pause' to 'registry.k8s.io/pause' 2023-06-07 23:32:31 -04:00
Amir Alavi
0bdbf51eb2 Move CrashLoopBackOff container state from PodLifeTime to TooManyRestarts plugin 2023-06-07 23:29:11 -04:00
Amir Alavi
9aad51f328 Revert "Merge pull request #1164 from a7i/podlifetime-CrashLoopBackOff"
This reverts commit 699297711a, reversing
changes made to 877d9b18ee.
2023-06-07 21:00:47 -04:00
Kubernetes Prow Robot
699297711a Merge pull request #1164 from a7i/podlifetime-CrashLoopBackOff
PodLifeTime: support CrashLoopBackOff container state
2023-06-07 10:46:13 -07:00
Amir Alavi
1b976529bc PodLifeTime: support CrashLoopBackOff container state 2023-06-06 18:21:43 -04:00
10hin
877d9b18ee pod anti-affinity check among nodes (#1033)
* pod anti-affinity check among nodes

* avoid pod equality check with UID field

also add node equality check with Name for short-cut

* add test case for anti-affinity violation among different node

* reduce ListPodsOnANode call

* fix old code

* apply gofumpt -w -extra

move klog/v2 import entry to bottom according to master code
2023-06-05 05:21:25 -07:00
Nicolas Lamirault
b58ec3b458 Custom labels for ServiceMonitor resource (#1147)
* Add: Custom labels for ServiceMonitor resource

Signed-off-by: Nicolas Lamirault <nicolas.lamirault@gmail.com>

* Fix: indentation

Signed-off-by: Nicolas Lamirault <nicolas.lamirault@gmail.com>

---------

Signed-off-by: Nicolas Lamirault <nicolas.lamirault@gmail.com>
2023-05-31 06:41:46 -07:00
Kubernetes Prow Robot
e94a6f129a Merge pull request #1160 from a7i/chart-v0.27.1
bump chart to v0.27.1
2023-05-31 01:29:46 -07:00
Kubernetes Prow Robot
1b22e102ce Merge pull request #1159 from mikutas/missing-header
docs: supplement missing link
2023-05-30 17:47:45 -07:00
Amir Alavi
a35e3f49b4 bump chart to v0.27.1 2023-05-30 09:19:11 -04:00
mikutas
9313041d77 docs: supplement missing link 2023-05-30 17:09:47 +09:00
Kubernetes Prow Robot
83c81e6b58 Merge pull request #1150 from a7i/fix-imagepullsecrets-indent
fix: imagepullsecrets indentation for kind: Deployment
2023-05-25 18:24:51 -07:00
Kubernetes Prow Robot
eb356fe6ff Merge pull request #1151 from a7i/helm-arg-cmd
helm: ability to override command and args. set args inline
2023-05-25 00:12:51 -07:00
Amir Alavi
a62bb54a3a helm: ability to override command and args. set args inline 2023-05-16 13:02:09 -04:00
Amir Alavi
92017d0fdd fix: imagepullsecrets indentation for kind: Deployment 2023-05-16 12:48:05 -04:00
Kubernetes Prow Robot
476bee3424 Merge pull request #1146 from a7i/generic-sets
update deprecated sets.String to generic sets
2023-05-12 06:29:02 -07:00
Kubernetes Prow Robot
b630ecaaf7 Merge pull request #1145 from ratnopamc/deprecate-k8s-release-url
Use dl.k8s.io instead of kubernetes-release bucket
2023-05-11 19:50:46 -07:00
Amir Alavi
359b38a34c update deprecated sets.String to generic sets 2023-05-11 22:35:44 -04:00
Ratnopam Chakrabarti
2c525afc76 Use dl.k8s.io instead of kubernetes-release bucket 2023-05-11 15:30:26 +00:00
Amir Alavi
e2826fa66c fix plugin arg conversion when using multiple profiles with same plugin (#1143)
* fix plugin arg conversion when using multiple profiles with same plugin

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>

* PR feedback to refactor validateDeschedulerConfiguration error handling

---------

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2023-05-11 08:25:03 -07:00
Kubernetes Prow Robot
0f5a5a2235 Merge pull request #1096 from a7i/version-compatibility-test
add unit tests for version compatibility check
2023-05-09 01:38:55 -07:00
Amir Alavi
1f6a736aab add unit tests for version compatibility check 2023-05-05 14:57:43 -04:00
Kubernetes Prow Robot
8cbbe5501b Merge pull request #1137 from a7i/gha-1.27
update helm github action workflow to v0.27.0
2023-05-05 08:25:13 -07:00
Amir Alavi
dd63aac88c update helm github action workflow 2023-05-05 10:11:41 -04:00
1080 changed files with 86171 additions and 22418 deletions

6
.gitattributes vendored Normal file
View File

@@ -0,0 +1,6 @@
# Always check-out / check-in files with LF line endings.
* text=auto eol=lf
go.sum merge=union
**/zz_generated.*.go linguist-generated=true
vendor/** linguist-vendored

View File

@@ -35,7 +35,7 @@ jobs:
- uses: actions/setup-go@v3
with:
go-version: '1.20.3'
go-version: '1.20.7'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.2.1
@@ -47,7 +47,7 @@ jobs:
run: |
changed=$(ct list-changed --config=.github/ci/ct.yaml)
if [[ -n "$changed" ]]; then
echo "::set-output name=changed::true"
echo "changed=true" >> $GITHUB_OUTPUT
fi
- name: Run chart-testing (lint)

View File

@@ -7,8 +7,8 @@ jobs:
deploy:
strategy:
matrix:
k8s-version: ["v1.26.0"]
descheduler-version: ["v0.26.1"]
k8s-version: ["v1.28.0"]
descheduler-version: ["v0.27.1"]
descheduler-api: ["v1alpha1", "v1alpha2"]
manifest: ["deployment"]
runs-on: ubuntu-latest

View File

@@ -22,7 +22,7 @@ jobs:
- name: Install Helm
uses: azure/setup-helm@v1
with:
version: v3.4.0
version: v3.7.0
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.1.0

View File

@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.20.3
FROM golang:1.20.7
WORKDIR /go/src/sigs.k8s.io/descheduler
COPY . .

View File

@@ -43,6 +43,10 @@ IMAGE:=descheduler:$(VERSION)
# IMAGE_GCLOUD is the image name of descheduler in the remote registry
IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
# CURRENT_DIR is the current dir where the Makefile exists
CURRENT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
# TODO: upload binaries to GCS bucket
#
# In the future binaries can be uploaded to
@@ -129,6 +133,9 @@ gen:
./hack/update-generated-defaulters.sh
./hack/update-docs.sh
gen-docker:
$(CONTAINER_ENGINE) run --entrypoint make -it -v $(CURRENT_DIR):/go/src/sigs.k8s.io/descheduler -w /go/src/sigs.k8s.io/descheduler golang:1.20.7 gen
verify-gen:
./hack/verify-conversions.sh
./hack/verify-deep-copies.sh

View File

@@ -38,6 +38,7 @@ that version's release branch, as listed below:
|Descheduler Version|Docs link|
|---|---|
|v0.28.x|[`release-1.28`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.28/README.md)|
|v0.27.x|[`release-1.27`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.27/README.md)|
|v0.26.x|[`release-1.26`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.26/README.md)|
|v0.25.x|[`release-1.25`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.25/README.md)|
@@ -112,7 +113,7 @@ See the [user guide](docs/user-guide.md) in the `/docs` directory.
## Policy, Default Evictor and Strategy plugins
**⚠️ v1alpha1 configuration is still suported, but deprecated (and soon will be removed). Please consider migrating to v1alpha2 (described bellow). For previous v1alpha1 documentation go to [docs/deprecated/v1alpha1.md](docs/deprecated/v1alpha1.md) ⚠️**
**⚠️ v1alpha1 configuration is still supported, but deprecated (and soon will be removed). Please consider migrating to v1alpha2 (described bellow). For previous v1alpha1 documentation go to [docs/deprecated/v1alpha1.md](docs/deprecated/v1alpha1.md) ⚠️**
The Descheduler Policy is configurable and includes default strategy plugins that can be enabled or disabled. It includes a common eviction configuration at the top level, as well as configuration from the Evictor plugin (Default Evictor, if not specified otherwise). Top-level configuration and Evictor plugin configuration are applied to all evictions.
@@ -201,6 +202,7 @@ Balance Plugins: These plugins process all pods, or groups of pods, and determin
| [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity) |Deschedule|Evicts pods violating node affinity|
| [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints) |Deschedule|Evicts pods violating node taints|
| [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint) |Balance|Evicts pods violating TopologySpreadConstraints|
| [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts) |Deschedule|Evicts pods having too many restarts|
| [PodLifeTime](#podlifetime) |Deschedule|Evicts pods that have exceeded a specified age limit|
| [RemoveFailedPods](#removefailedpods) |Deschedule|Evicts pods with certain failed reasons|
@@ -435,7 +437,10 @@ profiles:
This strategy makes sure all pods violating
[node affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity)
are eventually removed from nodes. Node affinity rules allow a pod to specify
`requiredDuringSchedulingIgnoredDuringExecution` type, which tells the scheduler
`requiredDuringSchedulingIgnoredDuringExecution` and/or
`preferredDuringSchedulingIgnoredDuringExecution`.
The `requiredDuringSchedulingIgnoredDuringExecution` type tells the scheduler
to respect node affinity when scheduling the pod but kubelet to ignore
in case node changes over time and no longer respects the affinity.
When enabled, the strategy serves as a temporary implementation
@@ -448,6 +453,14 @@ of scheduling. Over time nodeA stops to satisfy the rule. When the strategy gets
executed and there is another node available that satisfies the node affinity rule,
podA gets evicted from nodeA.
The `preferredDuringSchedulingIgnoredDuringExecution` type tells the scheduler
to respect node affinity when scheduling if that's possible. If not, the pod
gets scheduled anyway. It may happen that, over time, the state of the cluster
changes and now the pod can be scheduled on a node that actually fits its
preferred node affinity. When enabled, the strategy serves as a temporary
implementation of `preferredDuringSchedulingPreferredDuringExecution`, so the
pod will be evicted if it can be scheduled on a "better" node.
**Parameters:**
|Name|Type|
@@ -521,8 +534,17 @@ This strategy makes sure that pods violating [topology spread constraints](https
are evicted from nodes. Specifically, it tries to evict the minimum number of pods required to balance topology domains to within each constraint's `maxSkew`.
This strategy requires k8s version 1.18 at a minimum.
By default, this strategy only deals with hard constraints, setting parameter `includeSoftConstraints` to `true` will
include soft constraints.
By default, this strategy only includes hard constraints, you can explicitly set `constraints` as shown below to include both:
```yaml
constraints:
- DoNotSchedule
- ScheduleAnyway
```
The `topologyBalanceNodeFit` arg is used when balancing topology domains while the Default Evictor's `nodeFit` is used in pre-eviction to determine if a pod can be evicted.
```yaml
topologyBalanceNodeFit: false
```
Strategy parameter `labelSelector` is not utilized when balancing topology domains and is only applied during eviction to determine if the pod can be evicted.
@@ -530,9 +552,10 @@ Strategy parameter `labelSelector` is not utilized when balancing topology domai
|Name|Type|
|---|---|
|`includeSoftConstraints`|bool|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`constraints`|(see [whenUnsatisfiable](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core))||
|`topologyBalanceNodeFit`|bool|default `true`. [node fit filtering](#node-fit-filtering) when balancing topology domains|
**Example:**
@@ -544,7 +567,8 @@ profiles:
pluginConfig:
- name: "RemovePodsViolatingTopologySpreadConstraint"
args:
includeSoftConstraints: false
constraints:
- DoNotSchedule
plugins:
balance:
enabled:
@@ -560,6 +584,13 @@ include `podRestartThreshold`, which is the number of restarts (summed over all
should be evicted, and `includingInitContainers`, which determines whether init container restarts should be factored
into that calculation.
You can also specify `states` parameter to **only** evict pods matching the following conditions:
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) of: `CrashLoopBackOff`
If a value for `states` or `podStatusPhases` is not specified,
Pods in any state (even `Running`) are considered for eviction.
**Parameters:**
|Name|Type|
@@ -568,6 +599,7 @@ into that calculation.
|`includingInitContainers`|bool|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`states`|list(string)|Only supported in v0.28+|
**Example:**

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: descheduler
version: 0.27.0
appVersion: 0.27.0
version: 0.27.1
appVersion: 0.27.1
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
keywords:
- kubernetes

View File

@@ -75,6 +75,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
| `service.enabled` | If `true`, create a service for deployment | `false` |
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
| `serviceMonitor.additionalLabels` | Add custom labels to the ServiceMonitor resource | `{}` |
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |

View File

@@ -66,15 +66,11 @@ spec:
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- "/bin/descheduler"
{{- toYaml .Values.command | nindent 16 }}
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- --policy-config-file: "/policy-dir/policy.yaml"
{{- range $key, $value := .Values.cmdOptions }}
- {{ printf "--%s" $key | quote }}
{{- if $value }}
- {{ $value | quote }}
{{- end }}
- {{ printf "--%s" $key }}{{ if $value }}={{ $value }}{{ end }}
{{- end }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 16 }}

View File

@@ -37,24 +37,19 @@ spec:
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 10 }}
{{- toYaml . | nindent 6 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- "/bin/descheduler"
{{- toYaml .Values.command | nindent 12 }}
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--descheduling-interval"
- {{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
- --policy-config-file=/policy-dir/policy.yaml
- --descheduling-interval={{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
{{- range $key, $value := .Values.cmdOptions }}
- {{ printf "--%s" $key | quote }}
{{- if $value }}
- {{ $value | quote }}
{{- end }}
- {{ printf "--%s" $key }}{{ if $value }}={{ $value }}{{ end }}
{{- end }}
{{- include "descheduler.leaderElection" . | nindent 12 }}
ports:

View File

@@ -7,6 +7,9 @@ metadata:
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.additionalLabels }}
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
{{- end }}
spec:
jobLabel: jobLabel
namespaceSelector:

View File

@@ -67,6 +67,9 @@ leaderElection: {}
# resourceName: "descheduler"
# resourceNamescape: "kube-system"
command:
- "/bin/descheduler"
cmdOptions:
v: 3
@@ -79,6 +82,13 @@ deschedulerPolicy:
# maxNoOfPodsToEvictPerNamespace: 10
# ignorePvcPods: true
# evictLocalStoragePods: true
# tracing:
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
# transportCert: ""
# serviceName: ""
# serviceNamespace: ""
# sampleRate: 1.0
# fallbackToNoOpProviderOnError: true
strategies:
RemoveDuplicates:
enabled: true
@@ -177,6 +187,9 @@ serviceMonitor:
enabled: false
# The namespace where Prometheus expects to find service monitors.
# namespace: ""
# Add custom labels to the ServiceMonitor resource
additionalLabels: {}
# prometheus: kube-prometheus-stack
interval: ""
# honorLabels: true
insecureSkipVerify: true

View File

@@ -29,6 +29,7 @@ import (
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
"sigs.k8s.io/descheduler/pkg/tracing"
)
const (
@@ -74,7 +75,9 @@ func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, err
},
}
deschedulerscheme.Scheme.Default(&versionedCfg)
cfg := componentconfig.DeschedulerConfiguration{}
cfg := componentconfig.DeschedulerConfiguration{
Tracing: componentconfig.TracingConfiguration{},
}
if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
return nil, err
}
@@ -92,6 +95,12 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "Execute descheduler in dry run mode.")
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
fs.StringVar(&rs.Tracing.CollectorEndpoint, "otel-collector-endpoint", "", "Set this flag to the OpenTelemetry Collector Service Address")
fs.StringVar(&rs.Tracing.TransportCert, "otel-transport-ca-cert", "", "Path of the CA Cert that can be used to generate the client Certificate for establishing secure connection to the OTEL in gRPC mode")
fs.StringVar(&rs.Tracing.ServiceName, "otel-service-name", tracing.DefaultServiceName, "OTEL Trace name to be used with the resources")
fs.StringVar(&rs.Tracing.ServiceNamespace, "otel-trace-namespace", "", "OTEL Trace namespace to be used with the resources")
fs.Float64Var(&rs.Tracing.SampleRate, "otel-sample-rate", 1.0, "Sample rate to collect the Traces")
fs.BoolVar(&rs.Tracing.FallbackToNoOpProviderOnError, "otel-fallback-no-op-on-error", false, "Fallback to NoOp Tracer in case of error")
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)

View File

@@ -28,9 +28,11 @@ import (
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/descheduler"
"sigs.k8s.io/descheduler/pkg/tracing"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/watch"
apiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/mux"
restclient "k8s.io/client-go/rest"
@@ -111,6 +113,14 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
}
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
err := tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
if err != nil {
return err
}
defer tracing.Shutdown(ctx)
// increase the fake watch channel so the dry-run mode can be run
// over a cluster with thousands of pods
watch.DefaultChanSize = 100000
return descheduler.Run(ctx, rs)
}

View File

@@ -13,7 +13,7 @@ descheduler [flags]
### Options
```
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0)
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces and IP address families will be used. (default 0.0.0.0)
--cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates")
--client-connection-burst int32 Burst to use for interacting with kubernetes apiserver.
--client-connection-kubeconfig string File path to kube configuration for interacting with kubernetes apiserver.
@@ -32,6 +32,12 @@ descheduler [flags]
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 26s)
--logging-format string Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning. (default "text")
--otel-collector-endpoint string Set this flag to the OpenTelemetry Collector Service Address
--otel-fallback-no-op-on-error Fallback to NoOp Tracer in case of error
--otel-sample-rate float Sample rate to collect the Traces (default 1)
--otel-service-name string OTEL Trace name to be used with the resources (default "descheduler")
--otel-trace-namespace string OTEL Trace namespace to be used with the resources
--otel-transport-ca-cert string Path of the CA Cert that can be used to generate the client Certificate for establishing secure connection to the OTEL in gRPC mode
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
--permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]
--policy-config-file string File with descheduler policy configuration.

View File

@@ -109,17 +109,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
Run As A Job
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.27.0' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.28.0' | kubectl apply -f -
```
Run As A CronJob
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.27.0' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.28.0' | kubectl apply -f -
```
Run As A Deployment
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.27.0' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.28.0' | kubectl apply -f -
```
## User Guide

View File

@@ -4,6 +4,8 @@ Starting with descheduler release v0.10.0 container images are available in the
Descheduler Version | Container Image | Architectures |
------------------- |-------------------------------------------------|-------------------------|
v0.28.0 | registry.k8s.io/descheduler/descheduler:v0.28.0 | AMD64<br>ARM64<br>ARMv7 |
v0.27.1 | registry.k8s.io/descheduler/descheduler:v0.27.1 | AMD64<br>ARM64<br>ARMv7 |
v0.27.0 | registry.k8s.io/descheduler/descheduler:v0.27.0 | AMD64<br>ARM64<br>ARMv7 |
v0.26.1 | registry.k8s.io/descheduler/descheduler:v0.26.1 | AMD64<br>ARM64<br>ARMv7 |
v0.26.0 | registry.k8s.io/descheduler/descheduler:v0.26.0 | AMD64<br>ARM64<br>ARMv7 |

View File

@@ -21,7 +21,9 @@ profiles:
includingInitContainers: true
- name: "RemovePodsViolatingTopologySpreadConstraint"
args:
includeSoftConstraints: true
constraints:
- DoNotSchedule
- ScheduleAnyway
plugins:
deschedule:
enabled:

View File

@@ -0,0 +1,15 @@
apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "RemovePodsHavingTooManyRestarts"
args:
podRestartThreshold: 100
includingInitContainers: true
states:
- CrashLoopBackOff
plugins:
deschedule:
enabled:
- "RemovePodsHavingTooManyRestarts"

View File

@@ -5,7 +5,9 @@ profiles:
pluginConfig:
- name: "RemovePodsViolatingTopologySpreadConstraint"
args:
includeSoftConstraints: true # Include 'ScheduleAnyways' constraints
constraints:
- DoNotSchedule
- ScheduleAnyway
plugins:
balance:
enabled:

112
go.mod
View File

@@ -5,109 +5,111 @@ go 1.20
require (
github.com/client9/misspell v0.3.4
github.com/google/go-cmp v0.5.9
github.com/spf13/cobra v1.6.0
github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5
k8s.io/api v0.27.0
k8s.io/apimachinery v0.27.0
k8s.io/apiserver v0.27.0
k8s.io/client-go v0.27.0
k8s.io/code-generator v0.27.0
k8s.io/component-base v0.27.0
k8s.io/component-helpers v0.27.0
k8s.io/klog/v2 v2.90.1
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749
go.opentelemetry.io/otel v1.10.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0
go.opentelemetry.io/otel/sdk v1.10.0
go.opentelemetry.io/otel/trace v1.10.0
google.golang.org/grpc v1.54.0
k8s.io/api v0.28.0
k8s.io/apimachinery v0.28.0
k8s.io/apiserver v0.28.0
k8s.io/client-go v0.28.0
k8s.io/code-generator v0.28.0
k8s.io/component-base v0.28.0
k8s.io/component-helpers v0.28.0
k8s.io/klog/v2 v2.100.1
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2
sigs.k8s.io/mdtoc v1.1.0
)
require (
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.4.0 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.1 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
github.com/google/cel-go v0.12.6 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/cel-go v0.16.0 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
github.com/mitchellh/mapstructure v1.4.1 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.7 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect
go.etcd.io/etcd/client/v3 v3.5.7 // indirect
go.etcd.io/etcd/api/v3 v3.5.9 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
go.etcd.io/etcd/client/v3 v3.5.9 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect
go.opentelemetry.io/otel v1.10.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect
go.opentelemetry.io/otel/metric v0.31.0 // indirect
go.opentelemetry.io/otel/sdk v1.10.0 // indirect
go.opentelemetry.io/otel/trace v1.10.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.19.0 // indirect
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/term v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.7.0 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.13.0 // indirect
golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/sync v0.2.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/term v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.8.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
google.golang.org/grpc v1.51.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
k8s.io/kms v0.27.0 // indirect
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1 // indirect
k8s.io/kms v0.28.0 // indirect
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect

320
go.sum
View File

@@ -13,13 +13,15 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8=
cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@@ -38,31 +40,24 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves=
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -74,20 +69,18 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU=
github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -96,10 +89,9 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
@@ -108,36 +100,26 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8=
github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
@@ -176,10 +158,10 @@ github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M=
github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw=
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
github.com/google/cel-go v0.16.0 h1:DG9YQ8nFCFXAs/FDDwBxmL1tpKNrdlGUM9U3537bX/Y=
github.com/google/cel-go v0.16.0/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -187,14 +169,14 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@@ -224,135 +206,96 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM=
github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4=
github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk=
github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI=
github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY=
go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA=
go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg=
go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY=
go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU=
go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4=
go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw=
go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0=
go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc=
go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk=
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs=
go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE=
go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo=
go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E=
go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA=
go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ=
go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI=
go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@@ -379,23 +322,24 @@ go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/A
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -406,6 +350,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -426,11 +372,10 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -438,7 +383,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -458,20 +402,16 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY=
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=
golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -481,16 +421,12 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -499,7 +435,6 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -512,42 +447,30 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -592,8 +515,8 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -652,10 +575,13 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I=
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M=
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk=
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ=
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -672,9 +598,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -688,10 +613,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -700,19 +623,14 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -723,36 +641,36 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.27.0 h1:2owttiA8Oa+J3idFeq8TSnNpm4y6AOGPI3PDbIpp2cE=
k8s.io/api v0.27.0/go.mod h1:Wl+QRvQlh+T8SK5f4F6YBhhyH6hrFO08nl74xZb1MUE=
k8s.io/apimachinery v0.27.0 h1:vEyy/PVMbPMCPutrssCVHCf0JNZ0Px+YqPi82K2ALlk=
k8s.io/apimachinery v0.27.0/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM=
k8s.io/apiserver v0.27.0 h1:sXt/2yVMebZef6GqJHs4IYHSdSYwwrJCafBV/KSCwDw=
k8s.io/apiserver v0.27.0/go.mod h1:8heEJ5f6EqiKwXC3Ez3ikgOvGtRSEQG/SQZkhO9UzIg=
k8s.io/client-go v0.27.0 h1:DyZS1fJkv73tEy7rWv4VF6NwGeJ7SKvNaLRXZBYLA+4=
k8s.io/client-go v0.27.0/go.mod h1:XVEmpNnM+4JYO3EENoFV/ZDv3KxKVJUnzGo70avk+C4=
k8s.io/code-generator v0.27.0 h1:XQsGgUKnxuaPNr5V+Bg/AUmbvfhVu1jfxTW8PpQyGs0=
k8s.io/code-generator v0.27.0/go.mod h1:iWtpm0ZMG6Gc4daWfITDSIu+WFhFJArYDhj242zcbnY=
k8s.io/component-base v0.27.0 h1:g3/FkscH8Uqg9SiDCEfhfhTVwKiVo4T2+iBwUqiFkMg=
k8s.io/component-base v0.27.0/go.mod h1:PXyBQd/vYYjqqGB83rnsHffTTG6zlmxZAd0ZSOu6evk=
k8s.io/component-helpers v0.27.0 h1:rymQGJc4s30hHeb5VGuPdht8gKIPecj+Bw2FOJSavE4=
k8s.io/component-helpers v0.27.0/go.mod h1:vMjVwym/Y0BVyNvg8a4Et2vyPJAh/JhBM0OTRAt0Ceg=
k8s.io/api v0.28.0 h1:3j3VPWmN9tTDI68NETBWlDiA9qOiGJ7sdKeufehBYsM=
k8s.io/api v0.28.0/go.mod h1:0l8NZJzB0i/etuWnIXcwfIv+xnDOhL3lLW919AWYDuY=
k8s.io/apimachinery v0.28.0 h1:ScHS2AG16UlYWk63r46oU3D5y54T53cVI5mMJwwqFNA=
k8s.io/apimachinery v0.28.0/go.mod h1:X0xh/chESs2hP9koe+SdIAcXWcQ+RM5hy0ZynB+yEvw=
k8s.io/apiserver v0.28.0 h1:wVh7bK6Xj7hq+5ntInysTeQRAOqqFoKGUOW2yj8DXrY=
k8s.io/apiserver v0.28.0/go.mod h1:MvLmtxhQ0Tb1SZk4hfJBjs8iqr5nhYeaFSaoEcz7Lk4=
k8s.io/client-go v0.28.0 h1:ebcPRDZsCjpj62+cMk1eGNX1QkMdRmQ6lmz5BLoFWeM=
k8s.io/client-go v0.28.0/go.mod h1:0Asy9Xt3U98RypWJmU1ZrRAGKhP6NqDPmptlAzK2kMc=
k8s.io/code-generator v0.28.0 h1:msdkRVJNVFgdiIJ8REl/d3cZsMB9HByFcWMmn13NyuE=
k8s.io/code-generator v0.28.0/go.mod h1:ueeSJZJ61NHBa0ccWLey6mwawum25vX61nRZ6WOzN9A=
k8s.io/component-base v0.28.0 h1:HQKy1enJrOeJlTlN4a6dU09wtmXaUvThC0irImfqyxI=
k8s.io/component-base v0.28.0/go.mod h1:Yyf3+ZypLfMydVzuLBqJ5V7Kx6WwDr/5cN+dFjw1FNk=
k8s.io/component-helpers v0.28.0 h1:ubHUiEF7H/DOx4471pHHsLlH3EGu8jlEvnld5PS4KdI=
k8s.io/component-helpers v0.28.0/go.mod h1:i7hJ/oFhZImqUWwjLFG/yGkLpJ3KFoirY2DLYIMql6Q=
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08=
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kms v0.27.0 h1:adCotKQybOjxwbxW7ogXyv8uQGan/3Y126S2aNW4YFY=
k8s.io/kms v0.27.0/go.mod h1:vI2R4Nhw+PZ+DYtVPVYKsIqip2IYjZWK9bESR64WdIw=
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a h1:gmovKNur38vgoWfGtP5QOGNOA7ki4n6qNYoFAgMlNvg=
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY=
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 h1:xMMXJlJbsU8w3V5N2FLDQ8YgU8s1EoULdbQBcAeNJkY=
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kms v0.28.0 h1:BwJhU9qPcJhHLUcQjtelOSjYti+1/caJLr+4jHbKzTA=
k8s.io/kms v0.28.0/go.mod h1:CNU792ls92v2Ye7Vn1jn+xLqYtUSezDZNVu6PLbJyrU=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1 h1:MB1zkK+WMOmfLxEpjr1wEmkpcIhZC7kfTkZ0stg5bog=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1/go.mod h1:/4NLd21PQY0B+H+X0aDZdwUiVXYJQl/2NXA5KVtDiP4=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo=

View File

@@ -13,6 +13,7 @@ data:
pluginConfig:
- name: "DefaultEvictor"
- name: "RemovePodsViolatingInterPodAntiAffinity"
- name: "RemoveDuplicates"
- name: "LowNodeUtilization"
args:
thresholds:
@@ -30,4 +31,4 @@ data:
- "RemoveDuplicates"
deschedule:
enabled:
- "RemovePodsViolatingInterPodAntiAffinity"
- "RemovePodsViolatingInterPodAntiAffinity"

View File

@@ -16,7 +16,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.27.0
image: registry.k8s.io/descheduler/descheduler:v0.27.1
volumeMounts:
- mountPath: /policy-dir
name: policy-volume

View File

@@ -19,7 +19,7 @@ spec:
serviceAccountName: descheduler-sa
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.27.0
image: registry.k8s.io/descheduler/descheduler:v0.27.1
imagePullPolicy: IfNotPresent
command:
- "/bin/descheduler"

View File

@@ -14,7 +14,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.27.0
image: registry.k8s.io/descheduler/descheduler:v0.27.1
volumeMounts:
- mountPath: /policy-dir
name: policy-volume

View File

@@ -19,7 +19,9 @@ package v1alpha1
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
@@ -170,10 +172,15 @@ var StrategyParamsToPluginArgs = map[string]func(params *StrategyParameters) (*a
}, nil
},
"RemovePodsViolatingTopologySpreadConstraint": func(params *StrategyParameters) (*api.PluginConfig, error) {
constraints := []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule}
if params.IncludeSoftConstraints {
constraints = append(constraints, v1.ScheduleAnyway)
}
args := &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
LabelSelector: params.LabelSelector,
IncludeSoftConstraints: params.IncludeSoftConstraints,
Constraints: constraints,
TopologyBalanceNodeFit: utilpointer.Bool(true),
}
if err := removepodsviolatingtopologyspreadconstraint.ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)

View File

@@ -21,6 +21,7 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
@@ -567,13 +568,28 @@ func TestStrategyParamsToPluginArgsRemovePodsViolatingTopologySpreadConstraint(t
result: &api.PluginConfig{
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
IncludeSoftConstraints: true,
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
TopologyBalanceNodeFit: utilpointer.Bool(true),
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "params without soft constraints",
params: &StrategyParameters{
IncludeSoftConstraints: false,
},
err: nil,
result: &api.PluginConfig{
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
TopologyBalanceNodeFit: utilpointer.Bool(true),
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{

View File

@@ -88,7 +88,7 @@ func convertToInternalPluginConfigArgs(out *api.DeschedulerPolicy) error {
func Convert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
out.Name = in.Name
if _, ok := pluginregistry.PluginRegistry[in.Name]; ok {
out.Args = pluginregistry.PluginRegistry[in.Name].PluginArgInstance
out.Args = pluginregistry.PluginRegistry[in.Name].PluginArgInstance.DeepCopyObject()
if in.Args.Raw != nil {
_, _, err := Codecs.UniversalDecoder().Decode(in.Args.Raw, nil, out.Args)
if err != nil {

View File

@@ -55,6 +55,9 @@ type DeschedulerConfiguration struct {
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
IgnorePVCPods bool
// Tracing specifies the options for tracing.
Tracing TracingConfiguration
// LeaderElection starts Deployment using leader election loop
LeaderElection componentbaseconfig.LeaderElectionConfiguration
@@ -66,3 +69,25 @@ type DeschedulerConfiguration struct {
// Refer to [ClientConnection](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/componentconfig#ClientConnectionConfiguration) for more information.
ClientConnection componentbaseconfig.ClientConnectionConfiguration
}
type TracingConfiguration struct {
// CollectorEndpoint is the address of the OpenTelemetry collector.
// If not specified, tracing will be used NoopTraceProvider.
CollectorEndpoint string
// TransportCert is the path to the certificate file for the OpenTelemetry collector.
// If not specified, provider will start in insecure mode.
TransportCert string
// ServiceName is the name of the service to be used in the OpenTelemetry collector.
// If not specified, the default value is "descheduler".
ServiceName string
// ServiceNamespace is the namespace of the service to be used in the OpenTelemetry collector.
// If not specified, tracing will be used default namespace.
ServiceNamespace string
// SampleRate is used to configure the sample rate of the OTEL trace collection. This value will
// be used as the Base value with sample ratio. A value >= 1.0 will sample everything and < 0 will
// not sample anything. Everything else is a percentage value.
SampleRate float64
// FallbackToNoOpProviderOnError can be set in case if you want your trace provider to fallback to
// no op provider in case if the configured end point based provider can't be setup.
FallbackToNoOpProviderOnError bool
}

View File

@@ -55,6 +55,9 @@ type DeschedulerConfiguration struct {
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
// Tracing is used to setup the required OTEL tracing configuration
Tracing TracingConfiguration `json:"tracing,omitempty"`
// LeaderElection starts Deployment using leader election loop
LeaderElection componentbaseconfig.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
@@ -66,3 +69,25 @@ type DeschedulerConfiguration struct {
// Refer to [ClientConnection](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/componentconfig#ClientConnectionConfiguration) for more information.
ClientConnection componentbaseconfig.ClientConnectionConfiguration `json:"clientConnection,omitempty"`
}
type TracingConfiguration struct {
// CollectorEndpoint is the address of the OpenTelemetry collector.
// If not specified, tracing will be used NoopTraceProvider.
CollectorEndpoint string `json:"collectorEndpoint"`
// TransportCert is the path to the certificate file for the OpenTelemetry collector.
// If not specified, provider will start in insecure mode.
TransportCert string `json:"transportCert,omitempty"`
// ServiceName is the name of the service to be used in the OpenTelemetry collector.
// If not specified, the default value is "descheduler".
ServiceName string `json:"serviceName,omitempty"`
// ServiceNamespace is the namespace of the service to be used in the OpenTelemetry collector.
// If not specified, tracing will be used default namespace.
ServiceNamespace string `json:"serviceNamespace,omitempty"`
// SampleRate is used to configure the sample rate of the OTEL trace collection. This value will
// be used as the Base value with sample ratio. A value >= 1.0 will sample everything and < 0 will
// not sample anything. Everything else is a percentage value.
SampleRate float64 `json:"sampleRate"`
// FallbackToNoOpProviderOnError can be set in case if you want your trace provider to fallback to
// no op provider in case if the configured end point based provider can't be setup.
FallbackToNoOpProviderOnError bool `json:"fallbackToNoOpProviderOnError"`
}

View File

@@ -46,6 +46,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*TracingConfiguration)(nil), (*componentconfig.TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(a.(*TracingConfiguration), b.(*componentconfig.TracingConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*componentconfig.TracingConfiguration)(nil), (*TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(a.(*componentconfig.TracingConfiguration), b.(*TracingConfiguration), scope)
}); err != nil {
return err
}
return nil
}
@@ -58,6 +68,9 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods
out.IgnorePVCPods = in.IgnorePVCPods
if err := Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
return err
}
out.LeaderElection = in.LeaderElection
out.Logging = in.Logging
out.ClientConnection = in.ClientConnection
@@ -78,6 +91,9 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods
out.IgnorePVCPods = in.IgnorePVCPods
if err := Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
return err
}
out.LeaderElection = in.LeaderElection
out.Logging = in.Logging
out.ClientConnection = in.ClientConnection
@@ -88,3 +104,33 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
func Convert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in *componentconfig.DeschedulerConfiguration, out *DeschedulerConfiguration, s conversion.Scope) error {
return autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in, out, s)
}
func autoConvert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in *TracingConfiguration, out *componentconfig.TracingConfiguration, s conversion.Scope) error {
out.CollectorEndpoint = in.CollectorEndpoint
out.TransportCert = in.TransportCert
out.ServiceName = in.ServiceName
out.ServiceNamespace = in.ServiceNamespace
out.SampleRate = in.SampleRate
out.FallbackToNoOpProviderOnError = in.FallbackToNoOpProviderOnError
return nil
}
// Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in *TracingConfiguration, out *componentconfig.TracingConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in, out, s)
}
func autoConvert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *componentconfig.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
out.CollectorEndpoint = in.CollectorEndpoint
out.TransportCert = in.TransportCert
out.ServiceName = in.ServiceName
out.ServiceNamespace = in.ServiceNamespace
out.SampleRate = in.SampleRate
out.FallbackToNoOpProviderOnError = in.FallbackToNoOpProviderOnError
return nil
}
// Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration is an autogenerated conversion function.
func Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *componentconfig.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
return autoConvert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in, out, s)
}

View File

@@ -29,6 +29,7 @@ import (
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
out.Tracing = in.Tracing
out.LeaderElection = in.LeaderElection
in.Logging.DeepCopyInto(&out.Logging)
out.ClientConnection = in.ClientConnection
@@ -52,3 +53,19 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
if in == nil {
return nil
}
out := new(TracingConfiguration)
in.DeepCopyInto(out)
return out
}

View File

@@ -29,6 +29,7 @@ import (
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
out.Tracing = in.Tracing
out.LeaderElection = in.LeaderElection
in.Logging.DeepCopyInto(&out.Logging)
out.ClientConnection = in.ClientConnection
@@ -52,3 +53,19 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
if in == nil {
return nil
}
out := new(TracingConfiguration)
in.DeepCopyInto(out)
return out
}

View File

@@ -18,42 +18,209 @@ package descheduler
import (
"context"
"errors"
"fmt"
"math"
"strconv"
"strings"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"k8s.io/client-go/discovery"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/events"
componentbaseconfig "k8s.io/component-base/config"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
fakeclientset "k8s.io/client-go/kubernetes/fake"
listersv1 "k8s.io/client-go/listers/core/v1"
schedulingv1 "k8s.io/client-go/listers/scheduling/v1"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/descheduler/client"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
"sigs.k8s.io/descheduler/pkg/tracing"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/pkg/version"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/metrics"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/client"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
frameworkprofile "sigs.k8s.io/descheduler/pkg/framework/profile"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/pkg/version"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
)
type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status
type profileRunner struct {
name string
descheduleEPs, balanceEPs eprunner
}
type descheduler struct {
rs *options.DeschedulerServer
podLister listersv1.PodLister
nodeLister listersv1.NodeLister
namespaceLister listersv1.NamespaceLister
priorityClassLister schedulingv1.PriorityClassLister
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
sharedInformerFactory informers.SharedInformerFactory
evictionPolicyGroupVersion string
deschedulerPolicy *api.DeschedulerPolicy
eventRecorder events.EventRecorder
}
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
namespaceLister := sharedInformerFactory.Core().V1().Namespaces().Lister()
priorityClassLister := sharedInformerFactory.Scheduling().V1().PriorityClasses().Lister()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
}
return &descheduler{
rs: rs,
podLister: podLister,
nodeLister: nodeLister,
namespaceLister: namespaceLister,
priorityClassLister: priorityClassLister,
getPodsAssignedToNode: getPodsAssignedToNode,
sharedInformerFactory: sharedInformerFactory,
evictionPolicyGroupVersion: evictionPolicyGroupVersion,
deschedulerPolicy: deschedulerPolicy,
eventRecorder: eventRecorder,
}, nil
}
func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node) error {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "runDeschedulerLoop")
defer span.End()
defer func(loopStartDuration time.Time) {
metrics.DeschedulerLoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
}(time.Now())
// if len is still <= 1 error out
if len(nodes) <= 1 {
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
return fmt.Errorf("the cluster size is 0 or 1")
}
var client clientset.Interface
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
// So when evicting pods while running multiple strategies in a row have the cummulative effect
// as is when evicting pods for real.
if d.rs.DryRun {
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
// Create a new cache so we start from scratch without any leftovers
fakeClient, err := cachedClient(d.rs.Client, d.podLister, d.nodeLister, d.namespaceLister, d.priorityClassLister)
if err != nil {
return err
}
// create a new instance of the shared informer factor from the cached client
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
// register the pod informer, otherwise it will not get running
d.getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
if err != nil {
return fmt.Errorf("build get pods assigned to node function error: %v", err)
}
fakeCtx, cncl := context.WithCancel(context.TODO())
defer cncl()
fakeSharedInformerFactory.Start(fakeCtx.Done())
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
client = fakeClient
d.sharedInformerFactory = fakeSharedInformerFactory
} else {
client = d.rs.Client
}
klog.V(3).Infof("Building a pod evictor")
podEvictor := evictions.NewPodEvictor(
client,
d.evictionPolicyGroupVersion,
d.rs.DryRun,
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
nodes,
!d.rs.DisableMetrics,
d.eventRecorder,
)
d.runProfiles(ctx, client, nodes, podEvictor)
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
return nil
}
// runProfiles runs all the deschedule plugins of all profiles and
// later runs through all balance plugins of all profiles. (All Balance plugins should come after all Deschedule plugins)
// see https://github.com/kubernetes-sigs/descheduler/issues/979
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "runProfiles")
defer span.End()
var profileRunners []profileRunner
for _, profile := range d.deschedulerPolicy.Profiles {
currProfile, err := frameworkprofile.NewProfile(
profile,
pluginregistry.PluginRegistry,
frameworkprofile.WithClientSet(client),
frameworkprofile.WithSharedInformerFactory(d.sharedInformerFactory),
frameworkprofile.WithPodEvictor(podEvictor),
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
)
if err != nil {
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
continue
}
profileRunners = append(profileRunners, profileRunner{profile.Name, currProfile.RunDeschedulePlugins, currProfile.RunBalancePlugins})
}
for _, profileR := range profileRunners {
// First deschedule
status := profileR.descheduleEPs(ctx, nodes)
if status != nil && status.Err != nil {
span.AddEvent("failed to perform deschedule operations", trace.WithAttributes(attribute.String("err", status.Err.Error()), attribute.String("profile", profileR.name), attribute.String("operation", tracing.DescheduleOperation)))
klog.ErrorS(status.Err, "running deschedule extension point failed with error", "profile", profileR.name)
continue
}
}
for _, profileR := range profileRunners {
// Balance Later
status := profileR.balanceEPs(ctx, nodes)
if status != nil && status.Err != nil {
span.AddEvent("failed to perform balance operations", trace.WithAttributes(attribute.String("err", status.Err.Error()), attribute.String("profile", profileR.name), attribute.String("operation", tracing.BalanceOperation)))
klog.ErrorS(status.Err, "running balance extension point failed with error", "profile", profileR.name)
continue
}
}
}
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "Run")
defer span.End()
metrics.Register()
clientConnection := rs.ClientConnection
@@ -76,7 +243,9 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
}
// Add k8s compatibility warnings to logs
versionCompatibilityCheck(rs)
if err := validateVersionCompatibility(rs.Client.Discovery(), version.Get()); err != nil {
klog.Warning(err.Error())
}
evictionPolicyGroupVersion, err := eutils.SupportEviction(rs.Client)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
@@ -88,15 +257,17 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
}
if rs.LeaderElection.LeaderElect && rs.DeschedulingInterval.Seconds() == 0 {
span.AddEvent("Validation Failure", trace.WithAttributes(attribute.String("err", "leaderElection must be used with deschedulingInterval")))
return fmt.Errorf("leaderElection must be used with deschedulingInterval")
}
if rs.LeaderElection.LeaderElect && rs.DryRun {
klog.V(1).InfoS("Warning: DryRun is set to True. You need to disable it to use Leader Election.")
klog.V(1).Info("Warning: DryRun is set to True. You need to disable it to use Leader Election.")
}
if rs.LeaderElection.LeaderElect && !rs.DryRun {
if err := NewLeaderElection(runFn, rsclient, &rs.LeaderElection, ctx); err != nil {
span.AddEvent("Leader Election Failure", trace.WithAttributes(attribute.String("err", err.Error())))
return fmt.Errorf("leaderElection: %w", err)
}
return nil
@@ -105,27 +276,34 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
return runFn()
}
func versionCompatibilityCheck(rs *options.DeschedulerServer) {
serverVersion, serverErr := rs.Client.Discovery().ServerVersion()
func validateVersionCompatibility(discovery discovery.DiscoveryInterface, versionInfo version.Info) error {
serverVersion, serverErr := discovery.ServerVersion()
if serverErr != nil {
klog.V(1).InfoS("Warning: Get Kubernetes server version fail")
return
return errors.New("failed to get Kubernetes server version")
}
deschedulerMinorVersion := strings.Split(version.Get().Minor, ".")[0]
deschedulerMinorVersion := strings.Split(versionInfo.Minor, ".")[0]
deschedulerMinorVersionFloat, err := strconv.ParseFloat(deschedulerMinorVersion, 64)
if err != nil {
klog.Warning("Warning: Convert Descheduler minor version to float fail")
return errors.New("failed to convert Descheduler minor version to float")
}
kubernetesMinorVersionFloat, err := strconv.ParseFloat(serverVersion.Minor, 64)
if err != nil {
klog.Warning("Warning: Convert Kubernetes server minor version to float fail")
return errors.New("failed to convert Kubernetes server minor version to float")
}
if math.Abs(deschedulerMinorVersionFloat-kubernetesMinorVersionFloat) > 3 {
klog.Warningf("Warning: Descheduler minor version %v is not supported on your version of Kubernetes %v.%v. See compatibility docs for more info: https://github.com/kubernetes-sigs/descheduler#compatibility-matrix", deschedulerMinorVersion, serverVersion.Major, serverVersion.Minor)
return fmt.Errorf(
"descheduler minor version %v is not supported on your version of Kubernetes %v.%v. "+
"See compatibility docs for more info: https://github.com/kubernetes-sigs/descheduler#compatibility-matrix",
deschedulerMinorVersion,
serverVersion.Major,
serverVersion.Minor,
)
}
return nil
}
func cachedClient(
@@ -205,23 +383,11 @@ func cachedClient(
}
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "RunDeschedulerStrategies")
defer span.End()
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
namespaceLister := sharedInformerFactory.Core().V1().Namespaces().Lister()
priorityClassLister := sharedInformerFactory.Scheduling().V1().PriorityClasses().Lister()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
return fmt.Errorf("build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
var nodeSelector string
if deschedulerPolicy.NodeSelector != nil {
@@ -234,103 +400,38 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
} else {
eventClient = rs.Client
}
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
defer eventBroadcaster.Shutdown()
cycleSharedInformerFactory := sharedInformerFactory
descheduler, err := newDescheduler(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory)
if err != nil {
span.AddEvent("Failed to create new descheduler", trace.WithAttributes(attribute.String("err", err.Error())))
return err
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
wait.NonSlidingUntil(func() {
loopStartDuration := time.Now()
defer metrics.DeschedulerLoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeLister, nodeSelector)
// A next context is created here intentionally to avoid nesting the spans via context.
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
defer sSpan.End()
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, nodeLister, nodeSelector)
if err != nil {
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
klog.Error(err)
cancel()
return
}
if len(nodes) <= 1 {
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
err = descheduler.runDeschedulerLoop(sCtx, nodes)
if err != nil {
sSpan.AddEvent("Failed to run descheduler loop", trace.WithAttributes(attribute.String("err", err.Error())))
klog.Error(err)
cancel()
return
}
var client clientset.Interface
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
// So when evicting pods while running multiple strategies in a row have the cummulative effect
// as is when evicting pods for real.
if rs.DryRun {
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
// Create a new cache so we start from scratch without any leftovers
fakeClient, err := cachedClient(rs.Client, podLister, nodeLister, namespaceLister, priorityClassLister)
if err != nil {
klog.Error(err)
return
}
// create a new instance of the shared informer factor from the cached client
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
// register the pod informer, otherwise it will not get running
getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
if err != nil {
klog.Errorf("build get pods assigned to node function error: %v", err)
return
}
fakeCtx, cncl := context.WithCancel(context.TODO())
defer cncl()
fakeSharedInformerFactory.Start(fakeCtx.Done())
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
client = fakeClient
cycleSharedInformerFactory = fakeSharedInformerFactory
} else {
client = rs.Client
}
klog.V(3).Infof("Building a pod evictor")
podEvictor := evictions.NewPodEvictor(
client,
evictionPolicyGroupVersion,
rs.DryRun,
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
nodes,
!rs.DisableMetrics,
eventRecorder,
)
for _, profile := range deschedulerPolicy.Profiles {
currProfile, err := frameworkprofile.NewProfile(
profile,
pluginregistry.PluginRegistry,
frameworkprofile.WithClientSet(client),
frameworkprofile.WithSharedInformerFactory(cycleSharedInformerFactory),
frameworkprofile.WithPodEvictor(podEvictor),
frameworkprofile.WithGetPodsAssignedToNodeFnc(getPodsAssignedToNode),
)
if err != nil {
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
continue
}
// First deschedule
status := currProfile.RunDeschedulePlugins(ctx, nodes)
if status != nil && status.Err != nil {
klog.ErrorS(status.Err, "running deschedule extension point failed with error", "profile", profile.Name)
continue
}
// Then balance
status = currProfile.RunBalancePlugins(ctx, nodes)
if status != nil && status.Err != nil {
klog.ErrorS(status.Err, "running balance extension point failed with error", "profile", profile.Name)
continue
}
}
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
if rs.DeschedulingInterval.Seconds() == 0 {
cancel()

View File

@@ -11,6 +11,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
apiversion "k8s.io/apimachinery/pkg/version"
fakediscovery "k8s.io/client-go/discovery/fake"
fakeclientset "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
@@ -20,6 +22,7 @@ import (
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
deschedulerversion "sigs.k8s.io/descheduler/pkg/version"
"sigs.k8s.io/descheduler/test"
)
@@ -245,6 +248,61 @@ func TestRootCancelWithNoInterval(t *testing.T) {
}
}
func TestValidateVersionCompatibility(t *testing.T) {
type testCase struct {
name string
deschedulerMinor string
serverMinor string
expectError bool
}
testCases := []testCase{
{
name: "no error when descheduler minor equals to server minor",
deschedulerMinor: "26.0",
serverMinor: "26",
expectError: false,
},
{
name: "no error when descheduler minor is 3 behind server minor",
deschedulerMinor: "23.0",
serverMinor: "26",
expectError: false,
},
{
name: "no error when descheduler minor is 3 ahead of server minor",
deschedulerMinor: "26.0",
serverMinor: "23",
expectError: false,
},
{
name: "error when descheduler minor is 4 behind server minor",
deschedulerMinor: "22.0",
serverMinor: "26",
expectError: true,
},
{
name: "error when descheduler minor is 4 ahead of server minor",
deschedulerMinor: "27.0",
serverMinor: "23",
expectError: true,
},
}
client := fakeclientset.NewSimpleClientset()
fakeDiscovery, _ := client.Discovery().(*fakediscovery.FakeDiscovery)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fakeDiscovery.FakedServerVersion = &apiversion.Info{Major: "1", Minor: tc.serverMinor}
deschedulerVersion := deschedulerversion.Info{Major: "0", Minor: tc.deschedulerMinor}
err := validateVersionCompatibility(fakeDiscovery, deschedulerVersion)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected version compatibility behavior")
}
})
}
}
func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
return func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" {

View File

@@ -20,6 +20,8 @@ import (
"context"
"fmt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -30,6 +32,7 @@ import (
"sigs.k8s.io/descheduler/metrics"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
"sigs.k8s.io/descheduler/pkg/tracing"
)
// nodePodEvictedCount keeps count of pods evicted on node
@@ -113,6 +116,9 @@ type EvictOptions struct {
// EvictPod evicts a pod while exercising eviction limits.
// Returns true when the pod is evicted on the server side.
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) bool {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "EvictPod", trace.WithAttributes(attribute.String("podName", pod.Name), attribute.String("podNamespace", pod.Namespace), attribute.String("reason", opts.Reason), attribute.String("operation", tracing.EvictOperation)))
defer span.End()
// TODO: Replace context-propagated Strategy name with a defined framework handle for accessing Strategy info
strategy := ""
if ctx.Value("strategyName") != nil {
@@ -124,6 +130,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
}
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per node reached")))
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per node reached"), "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
return false
}
@@ -133,6 +140,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
}
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per namespace reached")))
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per namespace reached"), "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
return false
}
@@ -140,6 +148,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
if err != nil {
// err is used only for logging purposes
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()

View File

@@ -24,6 +24,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
listersv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/klog/v2"
@@ -120,8 +121,7 @@ func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v
}
// Check if the pod can fit on a node based off it's requests
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
ok, reqErrors := fitsRequest(nodeIndexer, pod, node)
if !ok {
if ok, reqErrors := fitsRequest(nodeIndexer, pod, node); !ok {
errors = append(errors, reqErrors...)
}
}
@@ -146,13 +146,11 @@ func PodFitsAnyOtherNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.
if len(errors) == 0 {
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true
} else {
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
for _, err := range errors {
klog.V(4).InfoS(err.Error())
}
}
klog.V(4).InfoS("Pod does not fit on node",
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
}
return false
}
@@ -164,13 +162,11 @@ func PodFitsAnyNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod,
if len(errors) == 0 {
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true
} else {
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
for _, err := range errors {
klog.V(4).InfoS(err.Error())
}
}
klog.V(4).InfoS("Pod does not fit on node",
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
}
return false
}
@@ -181,12 +177,11 @@ func PodFitsCurrentNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.P
if len(errors) == 0 {
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
return true
} else {
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
for _, err := range errors {
klog.V(4).InfoS(err.Error())
}
}
klog.V(4).InfoS("Pod does not fit on node",
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
return false
}
@@ -294,3 +289,37 @@ func IsBasicResource(name v1.ResourceName) bool {
return false
}
}
// GetNodeWeightGivenPodPreferredAffinity returns the weight
// that the pod gives to a node by analyzing the soft node affinity of that pod
// (nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution)
func GetNodeWeightGivenPodPreferredAffinity(pod *v1.Pod, node *v1.Node) int32 {
totalWeight, err := utils.GetNodeWeightGivenPodPreferredAffinity(pod, node)
if err != nil {
return 0
}
return totalWeight
}
// GetBestNodeWeightGivenPodPreferredAffinity returns the best weight
// (maximum one) that the pod gives to the best node by analyzing the soft node affinity
// of that pod (nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution)
func GetBestNodeWeightGivenPodPreferredAffinity(pod *v1.Pod, nodes []*v1.Node) int32 {
var bestWeight int32 = 0
for _, node := range nodes {
weight := GetNodeWeightGivenPodPreferredAffinity(pod, node)
if weight > bestWeight {
bestWeight = weight
}
}
return bestWeight
}
// PodMatchNodeSelector checks if a pod node selector matches the node label.
func PodMatchNodeSelector(pod *v1.Pod, node *v1.Node) bool {
matches, err := utils.PodMatchNodeSelector(pod, node)
if err != nil {
return false
}
return matches
}

View File

@@ -811,6 +811,78 @@ func TestNodeFit(t *testing.T) {
}
}
func TestBestPodNodeAffinityWeight(t *testing.T) {
defaultPod := test.BuildTestPod("p1", 0, 0, "node1", func(p *v1.Pod) {
p.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Weight: 10,
Preference: v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "key1",
Operator: "In",
Values: []string{"value1"},
},
},
},
},
},
},
}
})
tests := []struct {
description string
pod *v1.Pod
nodes []*v1.Node
expectedWeight int32
}{
{
description: "No node matches the preferred affinity",
pod: defaultPod,
nodes: []*v1.Node{
test.BuildTestNode("node2", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"key2": "value2",
}
}),
test.BuildTestNode("node3", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"key3": "value3",
}
}),
},
expectedWeight: 0,
},
{
description: "A single node matches the preferred affinity",
pod: defaultPod,
nodes: []*v1.Node{
test.BuildTestNode("node1", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"key1": "value1",
}
}),
test.BuildTestNode("node2", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"key2": "value2",
}
}),
},
expectedWeight: 10,
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
bestWeight := GetBestNodeWeightGivenPodPreferredAffinity(tc.pod, tc.nodes)
if bestWeight != tc.expectedWeight {
t.Errorf("Test %#v failed", tc.description)
}
})
}
}
// createResourceList builds a small resource list of core resources
func createResourceList(cpu, memory, ephemeralStorage int64) v1.ResourceList {
resourceList := make(map[v1.ResourceName]resource.Quantity)

View File

@@ -53,8 +53,8 @@ func WrapFilterFuncs(filters ...FilterFunc) FilterFunc {
type Options struct {
filter FilterFunc
includedNamespaces sets.String
excludedNamespaces sets.String
includedNamespaces sets.Set[string]
excludedNamespaces sets.Set[string]
labelSelector *metav1.LabelSelector
}
@@ -71,13 +71,13 @@ func (o *Options) WithFilter(filter FilterFunc) *Options {
}
// WithNamespaces sets included namespaces
func (o *Options) WithNamespaces(namespaces sets.String) *Options {
func (o *Options) WithNamespaces(namespaces sets.Set[string]) *Options {
o.includedNamespaces = namespaces
return o
}
// WithoutNamespaces sets excluded namespaces
func (o *Options) WithoutNamespaces(namespaces sets.String) *Options {
func (o *Options) WithoutNamespaces(namespaces sets.Set[string]) *Options {
o.excludedNamespaces = namespaces
return o
}
@@ -157,6 +157,20 @@ func BuildGetPodsAssignedToNodeFunc(podInformer cache.SharedIndexInformer) (GetP
return getPodsAssignedToNode, nil
}
// ListPodsOnNodes returns all pods on given nodes.
func ListPodsOnNodes(nodes []*v1.Node, getPodsAssignedToNode GetPodsAssignedToNodeFunc, filter FilterFunc) ([]*v1.Pod, error) {
pods := make([]*v1.Pod, 0)
for _, node := range nodes {
podsOnNode, err := ListPodsOnANode(node.Name, getPodsAssignedToNode, filter)
if err != nil {
return nil, err
}
pods = append(pods, podsOnNode...)
}
return pods, nil
}
// ListPodsOnANode lists all pods on a node.
// It also accepts a "filter" function which can be used to further limit the pods that are returned.
// (Usually this is podEvictor.Evictable().IsEvictable, in order to only list the evictable pods on a node, but can
@@ -187,6 +201,15 @@ func ListAllPodsOnANode(
return pods, nil
}
func GroupByNamespace(pods []*v1.Pod) map[string][]*v1.Pod {
m := make(map[string][]*v1.Pod)
for i := 0; i < len(pods); i++ {
pod := pods[i]
m[pod.Namespace] = append(m[pod.Namespace], pod)
}
return m
}
// OwnerRef returns the ownerRefList for the pod.
func OwnerRef(pod *v1.Pod) []metav1.OwnerReference {
return pod.ObjectMeta.GetOwnerReferences()

View File

@@ -21,6 +21,8 @@ import (
"fmt"
"os"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/runtime"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
@@ -133,29 +135,22 @@ func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interfac
}
func validateDeschedulerConfiguration(in api.DeschedulerPolicy, registry pluginregistry.Registry) error {
var errorsInProfiles error
var errorsInProfiles []error
for _, profile := range in.Profiles {
for _, pluginConfig := range profile.PluginConfigs {
if _, ok := registry[pluginConfig.Name]; ok {
if _, ok := registry[pluginConfig.Name]; !ok {
errorsInProfiles = fmt.Errorf("%w: %s", errorsInProfiles, fmt.Sprintf("in profile %s: plugin %s in pluginConfig not registered", profile.Name, pluginConfig.Name))
continue
}
if _, ok := registry[pluginConfig.Name]; !ok {
errorsInProfiles = append(errorsInProfiles, fmt.Errorf("in profile %s: plugin %s in pluginConfig not registered", profile.Name, pluginConfig.Name))
continue
}
pluginUtilities := registry[pluginConfig.Name]
if pluginUtilities.PluginArgValidator == nil {
continue
}
err := pluginUtilities.PluginArgValidator(pluginConfig.Args)
if err != nil {
if errorsInProfiles == nil {
errorsInProfiles = fmt.Errorf("in profile %s: %s", profile.Name, err.Error())
} else {
errorsInProfiles = fmt.Errorf("%w: %s", errorsInProfiles, fmt.Sprintf("in profile %s: %s", profile.Name, err.Error()))
}
}
pluginUtilities := registry[pluginConfig.Name]
if pluginUtilities.PluginArgValidator == nil {
continue
}
if err := pluginUtilities.PluginArgValidator(pluginConfig.Args); err != nil {
errorsInProfiles = append(errorsInProfiles, fmt.Errorf("in profile %s: %s", profile.Name, err.Error()))
}
}
}
return errorsInProfiles
return utilerrors.NewAggregate(errorsInProfiles)
}

View File

@@ -21,6 +21,7 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
fakeclientset "k8s.io/client-go/kubernetes/fake"
utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/descheduler/pkg/api"
@@ -399,7 +400,10 @@ func TestV1alpha1ToV1alpha2(t *testing.T) {
defaultEvictorPluginConfig,
{
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{},
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
TopologyBalanceNodeFit: utilpointer.Bool(true),
},
},
},
Plugins: api.Plugins{
@@ -714,7 +718,8 @@ func TestV1alpha1ToV1alpha2(t *testing.T) {
{
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
IncludeSoftConstraints: true,
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
TopologyBalanceNodeFit: utilpointer.Bool(true),
},
},
},
@@ -857,6 +862,79 @@ strategies:
},
},
},
{
description: "v1alpha1 to internal with priorityThreshold",
policy: []byte(`apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 5
namespaces:
include:
- "testleaderelection-a"
thresholdPriority: null
thresholdPriorityClassName: prioritym
`),
result: &api.DeschedulerPolicy{
Profiles: []api.DeschedulerProfile{
{
Name: fmt.Sprintf("strategy-%s-profile", podlifetime.PluginName),
PluginConfigs: []api.PluginConfig{
{
Name: "DefaultEvictor",
Args: &defaultevictor.DefaultEvictorArgs{
PriorityThreshold: &api.PriorityThreshold{
Value: utilpointer.Int32(0),
},
},
},
{
Name: podlifetime.PluginName,
Args: &podlifetime.PodLifeTimeArgs{
Namespaces: &api.Namespaces{
Include: []string{"testleaderelection-a"},
},
MaxPodLifeTimeSeconds: utilpointer.Uint(5),
},
},
},
Plugins: api.Plugins{
Filter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
PreEvictionFilter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
Deschedule: api.PluginSet{
Enabled: []string{podlifetime.PluginName},
},
},
},
},
},
},
{
description: "v1alpha1 to internal with priorityThreshold value and name should return error",
policy: []byte(`apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 5
namespaces:
include:
- "testleaderelection-a"
thresholdPriority: 222
thresholdPriorityClassName: prioritym
`),
result: nil,
err: fmt.Errorf("failed decoding descheduler's policy config \"filename\": priority threshold misconfigured for plugin PodLifeTime"),
},
{
description: "v1alpha2 to internal",
policy: []byte(`apiVersion: "descheduler/v1alpha2"
@@ -925,7 +1003,7 @@ profiles:
if err != nil {
if tc.err == nil {
t.Errorf("unexpected error: %s.", err.Error())
} else {
} else if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s. Was expecting %s", err.Error(), tc.err.Error())
}
}
@@ -985,7 +1063,7 @@ func TestValidateDeschedulerConfiguration(t *testing.T) {
},
},
},
result: fmt.Errorf("in profile RemoveFailedPods: only one of Include/Exclude namespaces can be set: in profile RemovePodsViolatingTopologySpreadConstraint: only one of Include/Exclude namespaces can be set"),
result: fmt.Errorf("[in profile RemoveFailedPods: only one of Include/Exclude namespaces can be set, in profile RemovePodsViolatingTopologySpreadConstraint: only one of Include/Exclude namespaces can be set]"),
},
}

View File

@@ -22,7 +22,7 @@ import (
func ValidateDefaultEvictorArgs(obj runtime.Object) error {
args := obj.(*DefaultEvictorArgs)
if args.PriorityThreshold != nil && len(args.PriorityThreshold.Name) > 0 {
if args.PriorityThreshold != nil && args.PriorityThreshold.Value != nil && len(args.PriorityThreshold.Name) > 0 {
return fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set, got %v", args)
}

View File

@@ -282,9 +282,9 @@ func evictPods(
podEvictor frameworktypes.Evictor,
continueEviction continueEvictionCond,
) {
var excludedNamespaces sets.String
var excludedNamespaces sets.Set[string]
if evictableNamespaces != nil {
excludedNamespaces = sets.NewString(evictableNamespaces.Exclude...)
excludedNamespaces = sets.New(evictableNamespaces.Exclude...)
}
if continueEviction(nodeInfo, totalAvailableUsage) {

View File

@@ -49,10 +49,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
return nil, fmt.Errorf("want args to be of type PodLifeTimeArgs, got %T", args)
}
var includedNamespaces, excludedNamespaces sets.String
var includedNamespaces, excludedNamespaces sets.Set[string]
if podLifeTimeArgs.Namespaces != nil {
includedNamespaces = sets.NewString(podLifeTimeArgs.Namespaces.Include...)
excludedNamespaces = sets.NewString(podLifeTimeArgs.Namespaces.Exclude...)
includedNamespaces = sets.New(podLifeTimeArgs.Namespaces.Include...)
excludedNamespaces = sets.New(podLifeTimeArgs.Namespaces.Exclude...)
}
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
@@ -72,7 +72,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
})
if len(podLifeTimeArgs.States) > 0 {
states := sets.NewString(podLifeTimeArgs.States...)
states := sets.New(podLifeTimeArgs.States...)
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
if states.Has(string(pod.Status.Phase)) {
return true
@@ -106,7 +106,7 @@ func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *framewo
nodeMap := make(map[string]*v1.Node, len(nodes))
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
if err != nil {
// no pods evicted as error encountered retrieving evictable Pods

View File

@@ -43,7 +43,7 @@ func ValidatePodLifeTimeArgs(obj runtime.Object) error {
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
}
}
podLifeTimeAllowedStates := sets.NewString(
podLifeTimeAllowedStates := sets.New(
string(v1.PodRunning),
string(v1.PodPending),
@@ -53,7 +53,7 @@ func ValidatePodLifeTimeArgs(obj runtime.Object) error {
)
if !podLifeTimeAllowedStates.HasAll(args.States...) {
return fmt.Errorf("states must be one of %v", podLifeTimeAllowedStates.List())
return fmt.Errorf("states must be one of %v", podLifeTimeAllowedStates.UnsortedList())
}
return nil

View File

@@ -64,10 +64,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
return nil, fmt.Errorf("want args to be of type RemoveDuplicatesArgs, got %T", args)
}
var includedNamespaces, excludedNamespaces sets.String
var includedNamespaces, excludedNamespaces sets.Set[string]
if removeDuplicatesArgs.Namespaces != nil {
includedNamespaces = sets.NewString(removeDuplicatesArgs.Namespaces.Include...)
excludedNamespaces = sets.NewString(removeDuplicatesArgs.Namespaces.Exclude...)
includedNamespaces = sets.New(removeDuplicatesArgs.Namespaces.Include...)
excludedNamespaces = sets.New(removeDuplicatesArgs.Namespaces.Exclude...)
}
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
@@ -100,7 +100,7 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
nodeMap := make(map[string]*v1.Node)
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(node.Name, r.handle.GetPodsAssignedToNodeFunc(), r.podFilter)
if err != nil {
klog.ErrorS(err, "Error listing evictable pods on node", "node", klog.KObj(node))
@@ -279,7 +279,7 @@ func hasExcludedOwnerRefKind(ownerRefs []metav1.OwnerReference, excludeOwnerKind
return false
}
exclude := sets.NewString(excludeOwnerKinds...)
exclude := sets.New(excludeOwnerKinds...)
for _, owner := range ownerRefs {
if exclude.Has(owner.Kind) {
return true

View File

@@ -50,10 +50,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
return nil, fmt.Errorf("want args to be of type RemoveFailedPodsArgs, got %T", args)
}
var includedNamespaces, excludedNamespaces sets.String
var includedNamespaces, excludedNamespaces sets.Set[string]
if failedPodsArgs.Namespaces != nil {
includedNamespaces = sets.NewString(failedPodsArgs.Namespaces.Include...)
excludedNamespaces = sets.NewString(failedPodsArgs.Namespaces.Exclude...)
includedNamespaces = sets.New(failedPodsArgs.Namespaces.Include...)
excludedNamespaces = sets.New(failedPodsArgs.Namespaces.Exclude...)
}
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
@@ -93,7 +93,7 @@ func (d *RemoveFailedPods) Name() string {
// Deschedule extension point implementation for the plugin
func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
if err != nil {
// no pods evicted as error encountered retrieving evictable Pods
@@ -126,7 +126,7 @@ func validateCanEvict(pod *v1.Pod, failedPodArgs *RemoveFailedPodsArgs) error {
if len(failedPodArgs.ExcludeOwnerKinds) > 0 {
ownerRefList := podutil.OwnerRef(pod)
for _, owner := range ownerRefList {
if sets.NewString(failedPodArgs.ExcludeOwnerKinds...).Has(owner.Kind) {
if sets.New(failedPodArgs.ExcludeOwnerKinds...).Has(owner.Kind) {
errs = append(errs, fmt.Errorf("pod's owner kind of %s is excluded", owner.Kind))
}
}
@@ -143,7 +143,7 @@ func validateCanEvict(pod *v1.Pod, failedPodArgs *RemoveFailedPodsArgs) error {
reasons = append(reasons, getFailedContainerStatusReasons(pod.Status.InitContainerStatuses)...)
}
if !sets.NewString(failedPodArgs.Reasons...).HasAny(reasons...) {
if !sets.New(failedPodArgs.Reasons...).HasAny(reasons...) {
errs = append(errs, fmt.Errorf("pod does not match any of the reasons"))
}
}

View File

@@ -37,4 +37,7 @@ func SetDefaults_RemovePodsHavingTooManyRestartsArgs(obj runtime.Object) {
if !args.IncludingInitContainers {
args.IncludingInitContainers = false
}
if args.States == nil {
args.States = nil
}
}

View File

@@ -17,6 +17,7 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@@ -37,6 +38,7 @@ func TestSetDefaults_RemovePodsHavingTooManyRestartsArgs(t *testing.T) {
LabelSelector: nil,
PodRestartThreshold: 0,
IncludingInitContainers: false,
States: nil,
},
},
{
@@ -46,12 +48,14 @@ func TestSetDefaults_RemovePodsHavingTooManyRestartsArgs(t *testing.T) {
LabelSelector: &metav1.LabelSelector{},
PodRestartThreshold: 10,
IncludingInitContainers: true,
States: []string{string(v1.PodRunning)},
},
want: &RemovePodsHavingTooManyRestartsArgs{
Namespaces: &api.Namespaces{},
LabelSelector: &metav1.LabelSelector{},
PodRestartThreshold: 10,
IncludingInitContainers: true,
States: []string{string(v1.PodRunning)},
},
},
}

View File

@@ -50,10 +50,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
return nil, fmt.Errorf("want args to be of type RemovePodsHavingTooManyRestartsArgs, got %T", args)
}
var includedNamespaces, excludedNamespaces sets.String
var includedNamespaces, excludedNamespaces sets.Set[string]
if tooManyRestartsArgs.Namespaces != nil {
includedNamespaces = sets.NewString(tooManyRestartsArgs.Namespaces.Include...)
excludedNamespaces = sets.NewString(tooManyRestartsArgs.Namespaces.Exclude...)
includedNamespaces = sets.New(tooManyRestartsArgs.Namespaces.Include...)
excludedNamespaces = sets.New(tooManyRestartsArgs.Namespaces.Exclude...)
}
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
@@ -75,6 +75,23 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
return true
})
if len(tooManyRestartsArgs.States) > 0 {
states := sets.New(tooManyRestartsArgs.States...)
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
if states.Has(string(pod.Status.Phase)) {
return true
}
for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.State.Waiting != nil && states.Has(containerStatus.State.Waiting.Reason) {
return true
}
}
return false
})
}
return &RemovePodsHavingTooManyRestarts{
handle: handle,
args: tooManyRestartsArgs,
@@ -90,7 +107,7 @@ func (d *RemovePodsHavingTooManyRestarts) Name() string {
// Deschedule extension point implementation for the plugin
func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
if err != nil {
// no pods evicted as error encountered retrieving evictable Pods

View File

@@ -104,8 +104,6 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
node4 := test.BuildTestNode("node4", 200, 3000, 10, nil)
node5 := test.BuildTestNode("node5", 2000, 3000, 10, nil)
pods := append(append(initPods(node1), test.BuildTestPod("CPU-consumer-1", 150, 100, node4.Name, nil)), test.BuildTestPod("CPU-consumer-2", 150, 100, node5.Name, nil))
createRemovePodsHavingTooManyRestartsAgrs := func(
podRestartThresholds int32,
includingInitContainers bool,
@@ -126,6 +124,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
nodeFit bool
applyFunc func([]*v1.Pod)
}{
{
description: "All pods have total restarts under threshold, no pod evictions",
@@ -191,7 +190,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
maxNoOfPodsToEvictPerNamespace: &uint3,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tained, 0 pod evictions",
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tainted, 0 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0,
@@ -222,10 +221,68 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
},
{
description: "pods are in CrashLoopBackOff with states=CrashLoopBackOff, 3 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
nodes: []*v1.Node{node1, node5},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
applyFunc: func(pods []*v1.Pod) {
for _, pod := range pods {
if len(pod.Status.ContainerStatuses) > 0 {
pod.Status.ContainerStatuses[0].State = v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
}
}
}
},
},
{
description: "pods without CrashLoopBackOff with states=CrashLoopBackOff, 0 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
nodes: []*v1.Node{node1, node5},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
},
{
description: "pods running with state=Running, 3 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
applyFunc: func(pods []*v1.Pod) {
for _, pod := range pods {
pod.Status.Phase = v1.PodRunning
}
},
},
{
description: "pods pending with state=Running, 0 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
applyFunc: func(pods []*v1.Pod) {
for _, pod := range pods {
pod.Status.Phase = v1.PodPending
}
},
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
pods := append(
initPods(node1),
test.BuildTestPod("CPU-consumer-1", 150, 100, node4.Name, nil),
test.BuildTestPod("CPU-consumer-2", 150, 100, node5.Name, nil),
)
if tc.applyFunc != nil {
tc.applyFunc(pods)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

View File

@@ -29,4 +29,5 @@ type RemovePodsHavingTooManyRestartsArgs struct {
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
PodRestartThreshold int32 `json:"podRestartThreshold"`
IncludingInitContainers bool `json:"includingInitContainers"`
States []string `json:"states"`
}

View File

@@ -16,8 +16,10 @@ package removepodshavingtoomanyrestarts
import (
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
)
// ValidateRemovePodsHavingTooManyRestartsArgs validates RemovePodsHavingTooManyRestarts arguments
@@ -38,5 +40,17 @@ func ValidateRemovePodsHavingTooManyRestartsArgs(obj runtime.Object) error {
return fmt.Errorf("invalid PodsHavingTooManyRestarts threshold")
}
allowedStates := sets.New(
// Pod phases:
string(v1.PodRunning),
// Container state reasons:
"CrashLoopBackOff",
)
if !allowedStates.HasAll(args.States...) {
return fmt.Errorf("states must be one of %v", allowedStates.UnsortedList())
}
return nil
}

View File

@@ -0,0 +1,74 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package removepodshavingtoomanyrestarts
import (
"testing"
v1 "k8s.io/api/core/v1"
)
func TestValidateRemovePodsHavingTooManyRestartsArgs(t *testing.T) {
testCases := []struct {
description string
args *RemovePodsHavingTooManyRestartsArgs
expectError bool
}{
{
description: "valid arg, no errors",
args: &RemovePodsHavingTooManyRestartsArgs{
PodRestartThreshold: 1,
States: []string{string(v1.PodRunning)},
},
expectError: false,
},
{
description: "invalid PodRestartThreshold arg, expects errors",
args: &RemovePodsHavingTooManyRestartsArgs{
PodRestartThreshold: 0,
},
expectError: true,
},
{
description: "invalid States arg, expects errors",
args: &RemovePodsHavingTooManyRestartsArgs{
PodRestartThreshold: 1,
States: []string{string(v1.PodFailed)},
},
expectError: true,
},
{
description: "allows CrashLoopBackOff state",
args: &RemovePodsHavingTooManyRestartsArgs{
PodRestartThreshold: 1,
States: []string{"CrashLoopBackOff"},
},
expectError: false,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidateRemovePodsHavingTooManyRestartsArgs(tc.args)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
}
})
}
}

View File

@@ -41,6 +41,11 @@ func (in *RemovePodsHavingTooManyRestartsArgs) DeepCopyInto(out *RemovePodsHavin
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.States != nil {
in, out := &in.States, &out.States
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}

View File

@@ -50,10 +50,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingInterPodAntiAffinityArgs, got %T", args)
}
var includedNamespaces, excludedNamespaces sets.String
var includedNamespaces, excludedNamespaces sets.Set[string]
if interPodAntiAffinityArgs.Namespaces != nil {
includedNamespaces = sets.NewString(interPodAntiAffinityArgs.Namespaces.Include...)
excludedNamespaces = sets.NewString(interPodAntiAffinityArgs.Namespaces.Exclude...)
includedNamespaces = sets.New(interPodAntiAffinityArgs.Namespaces.Include...)
excludedNamespaces = sets.New(interPodAntiAffinityArgs.Namespaces.Exclude...)
}
podFilter, err := podutil.NewOptions().
@@ -78,24 +78,31 @@ func (d *RemovePodsViolatingInterPodAntiAffinity) Name() string {
}
func (d *RemovePodsViolatingInterPodAntiAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
pods, err := podutil.ListPodsOnNodes(nodes, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
if err != nil {
return &frameworktypes.Status{
Err: fmt.Errorf("error listing all pods: %v", err),
}
}
podsInANamespace := podutil.GroupByNamespace(pods)
podsOnANode := groupByNodeName(pods)
nodeMap := createNodeMap(nodes)
loop:
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
if err != nil {
return &frameworktypes.Status{
Err: fmt.Errorf("error listing pods: %v", err),
}
}
// sort the evictable Pods based on priority, if there are multiple pods with same priority, they are sorted based on QoS tiers.
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
pods := podsOnANode[node.Name]
// sort the evict-able Pods based on priority, if there are multiple pods with same priority, they are sorted based on QoS tiers.
podutil.SortPodsBasedOnPriorityLowToHigh(pods)
totalPods := len(pods)
for i := 0; i < totalPods; i++ {
if checkPodsWithAntiAffinityExist(pods[i], pods) && d.handle.Evictor().Filter(pods[i]) && d.handle.Evictor().PreEvictionFilter(pods[i]) {
if checkPodsWithAntiAffinityExist(pods[i], podsInANamespace, nodeMap) && d.handle.Evictor().Filter(pods[i]) && d.handle.Evictor().PreEvictionFilter(pods[i]) {
if d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{}) {
// Since the current pod is evicted all other pods which have anti-affinity with this
// pod need not be evicted.
// Update pods.
// Update allPods.
podsInANamespace = removePodFromNamespaceMap(pods[i], podsInANamespace)
pods = append(pods[:i], pods[i+1:]...)
i--
totalPods--
@@ -109,8 +116,40 @@ loop:
return nil
}
func removePodFromNamespaceMap(podToRemove *v1.Pod, podMap map[string][]*v1.Pod) map[string][]*v1.Pod {
podList, ok := podMap[podToRemove.Namespace]
if !ok {
return podMap
}
for i := 0; i < len(podList); i++ {
podToCheck := podList[i]
if podToRemove.Name == podToCheck.Name {
podMap[podToRemove.Namespace] = append(podList[:i], podList[i+1:]...)
return podMap
}
}
return podMap
}
func groupByNodeName(pods []*v1.Pod) map[string][]*v1.Pod {
m := make(map[string][]*v1.Pod)
for i := 0; i < len(pods); i++ {
pod := pods[i]
m[pod.Spec.NodeName] = append(m[pod.Spec.NodeName], pod)
}
return m
}
func createNodeMap(nodes []*v1.Node) map[string]*v1.Node {
m := make(map[string]*v1.Node, len(nodes))
for _, node := range nodes {
m[node.GetName()] = node
}
return m
}
// checkPodsWithAntiAffinityExist checks if there are other pods on the node that the current pod cannot tolerate.
func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods []*v1.Pod) bool {
func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods map[string][]*v1.Pod, nodeMap map[string]*v1.Node) bool {
affinity := pod.Spec.Affinity
if affinity != nil && affinity.PodAntiAffinity != nil {
for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) {
@@ -120,9 +159,22 @@ func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods []*v1.Pod) bool {
klog.ErrorS(err, "Unable to convert LabelSelector into Selector")
return false
}
for _, existingPod := range pods {
if existingPod.Name != pod.Name && utils.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) {
return true
for namespace := range namespaces {
for _, existingPod := range pods[namespace] {
if existingPod.Name != pod.Name && utils.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) {
node, ok := nodeMap[pod.Spec.NodeName]
if !ok {
continue
}
nodeHavingExistingPod, ok := nodeMap[existingPod.Spec.NodeName]
if !ok {
continue
}
if hasSameLabelValue(node, nodeHavingExistingPod, term.TopologyKey) {
klog.V(1).InfoS("Found Pods violating PodAntiAffinity", "pod to evicted", klog.KObj(pod))
return true
}
}
}
}
}
@@ -130,6 +182,29 @@ func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods []*v1.Pod) bool {
return false
}
func hasSameLabelValue(node1, node2 *v1.Node, key string) bool {
if node1.Name == node2.Name {
return true
}
node1Labels := node1.Labels
if node1Labels == nil {
return false
}
node2Labels := node2.Labels
if node2Labels == nil {
return false
}
value1, ok := node1Labels[key]
if !ok {
return false
}
value2, ok := node2Labels[key]
if !ok {
return false
}
return value1 == value2
}
// getPodAntiAffinityTerms gets the antiaffinity terms for the given pod.
func getPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) {
if podAntiAffinity != nil {

View File

@@ -38,19 +38,27 @@ import (
)
func TestPodAntiAffinity(t *testing.T) {
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
node1 := test.BuildTestNode("n1", 2000, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"region": "main-region",
}
})
node2 := test.BuildTestNode("n2", 2000, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"datacenter": "east",
}
})
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
node.Spec = v1.NodeSpec{
Unschedulable: true,
}
})
node4 := test.BuildTestNode("n4", 2, 2, 1, nil)
node5 := test.BuildTestNode("n5", 200, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"region": "main-region",
}
})
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
@@ -62,6 +70,7 @@ func TestPodAntiAffinity(t *testing.T) {
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
p9 := test.BuildTestPod("p9", 100, 0, node1.Name, nil)
p10 := test.BuildTestPod("p10", 100, 0, node1.Name, nil)
p11 := test.BuildTestPod("p11", 100, 0, node5.Name, nil)
p9.DeletionTimestamp = &metav1.Time{}
p10.DeletionTimestamp = &metav1.Time{}
@@ -73,6 +82,7 @@ func TestPodAntiAffinity(t *testing.T) {
p5.Labels = map[string]string{"foo": "bar"}
p6.Labels = map[string]string{"foo": "bar"}
p7.Labels = map[string]string{"foo1": "bar1"}
p11.Labels = map[string]string{"foo": "bar"}
nonEvictablePod.Labels = map[string]string{"foo": "bar"}
test.SetNormalOwnerRef(p1)
test.SetNormalOwnerRef(p2)
@@ -83,6 +93,7 @@ func TestPodAntiAffinity(t *testing.T) {
test.SetNormalOwnerRef(p7)
test.SetNormalOwnerRef(p9)
test.SetNormalOwnerRef(p10)
test.SetNormalOwnerRef(p11)
// set pod anti affinity
setPodAntiAffinity(p1, "foo", "bar")
@@ -186,6 +197,13 @@ func TestPodAntiAffinity(t *testing.T) {
expectedEvictedPodCount: 0,
nodeFit: true,
},
{
description: "Evict pod violating anti-affinity among different node (all pods have anti-affinity)",
pods: []*v1.Pod{p1, p11},
nodes: []*v1.Node{node1, node5},
expectedEvictedPodCount: 1,
nodeFit: false,
},
}
for _, test := range tests {

View File

@@ -26,6 +26,7 @@ import (
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/pkg/utils"
)
const PluginName = "RemovePodsViolatingNodeAffinity"
@@ -46,10 +47,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingNodeAffinityArgs, got %T", args)
}
var includedNamespaces, excludedNamespaces sets.String
var includedNamespaces, excludedNamespaces sets.Set[string]
if nodeAffinityArgs.Namespaces != nil {
includedNamespaces = sets.NewString(nodeAffinityArgs.Namespaces.Include...)
excludedNamespaces = sets.NewString(nodeAffinityArgs.Namespaces.Exclude...)
includedNamespaces = sets.New(nodeAffinityArgs.Namespaces.Include...)
excludedNamespaces = sets.New(nodeAffinityArgs.Namespaces.Exclude...)
}
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
@@ -78,40 +79,68 @@ func (d *RemovePodsViolatingNodeAffinity) Name() string {
func (d *RemovePodsViolatingNodeAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
for _, nodeAffinity := range d.args.NodeAffinityType {
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
var err *frameworktypes.Status = nil
// The pods that we'll evict must be evictable. For example, the current number of replicas
// must be greater than the pdb.minValue.
// The pods must be able to get scheduled on a different node. Otherwise, it doesn't make much
// sense to evict them.
switch nodeAffinity {
case "requiredDuringSchedulingIgnoredDuringExecution":
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(
node.Name,
d.handle.GetPodsAssignedToNodeFunc(),
podutil.WrapFilterFuncs(d.podFilter, func(pod *v1.Pod) bool {
return d.handle.Evictor().Filter(pod) &&
!nodeutil.PodFitsCurrentNode(d.handle.GetPodsAssignedToNodeFunc(), pod, node) &&
nodeutil.PodFitsAnyNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes)
}),
)
if err != nil {
return &frameworktypes.Status{
Err: fmt.Errorf("error listing pods on a node: %v", err),
}
}
for _, pod := range pods {
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
if d.handle.Evictor().NodeLimitExceeded(node) {
break
}
}
}
// In this specific case, the pod must also violate the nodeSelector to be evicted
filterFunc := func(pod *v1.Pod, node *v1.Node, nodes []*v1.Node) bool {
return utils.PodHasNodeAffinity(pod, utils.RequiredDuringSchedulingIgnoredDuringExecution) &&
d.handle.Evictor().Filter(pod) &&
nodeutil.PodFitsAnyNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) &&
!nodeutil.PodMatchNodeSelector(pod, node)
}
err = d.processNodes(ctx, nodes, filterFunc)
case "preferredDuringSchedulingIgnoredDuringExecution":
// In this specific case, the pod must have a better fit on another node than
// in the current one based on the preferred node affinity
filterFunc := func(pod *v1.Pod, node *v1.Node, nodes []*v1.Node) bool {
return utils.PodHasNodeAffinity(pod, utils.PreferredDuringSchedulingIgnoredDuringExecution) &&
d.handle.Evictor().Filter(pod) &&
nodeutil.PodFitsAnyNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) &&
(nodeutil.GetBestNodeWeightGivenPodPreferredAffinity(pod, nodes) > nodeutil.GetNodeWeightGivenPodPreferredAffinity(pod, node))
}
err = d.processNodes(ctx, nodes, filterFunc)
default:
klog.ErrorS(nil, "Invalid nodeAffinityType", "nodeAffinity", nodeAffinity)
}
if err != nil {
return err
}
}
return nil
}
func (d *RemovePodsViolatingNodeAffinity) processNodes(ctx context.Context, nodes []*v1.Node, filterFunc func(*v1.Pod, *v1.Node, []*v1.Node) bool) *frameworktypes.Status {
for _, node := range nodes {
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
// Potentially evictable pods
pods, err := podutil.ListPodsOnANode(
node.Name,
d.handle.GetPodsAssignedToNodeFunc(),
podutil.WrapFilterFuncs(d.podFilter, func(pod *v1.Pod) bool {
return filterFunc(pod, node, nodes)
}),
)
if err != nil {
return &frameworktypes.Status{
Err: fmt.Errorf("error listing pods on a node: %v", err),
}
}
for _, pod := range pods {
klog.V(1).InfoS("Evicting pod", "pod", klog.KObj(pod))
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
if d.handle.Evictor().NodeLimitExceeded(node) {
break
}
}
}
return nil
}

View File

@@ -48,26 +48,49 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
unschedulableNodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
unschedulableNodeWithLabels.Spec.Unschedulable = true
addPodsToNode := func(node *v1.Node, deletionTimestamp *metav1.Time) []*v1.Pod {
addPodsToNode := func(node *v1.Node, deletionTimestamp *metav1.Time, affinityType string) []*v1.Pod {
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name, nil)
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
NodeAffinity: &v1.NodeAffinity{},
}
switch affinityType {
case "requiredDuringSchedulingIgnoredDuringExecution":
podWithNodeAffinity.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
},
}
case "preferredDuringSchedulingIgnoredDuringExecution":
podWithNodeAffinity.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.PreferredSchedulingTerm{
{
Weight: 10,
Preference: v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
}
case "requiredDuringSchedulingRequiredDuringExecution":
default:
t.Fatalf("Invalid affinity type %s", affinityType)
}
pod1 := test.BuildTestPod("pod1", 100, 0, node.Name, nil)
@@ -77,6 +100,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
podWithNodeAffinity.DeletionTimestamp = deletionTimestamp
pod1.DeletionTimestamp = deletionTimestamp
pod2.DeletionTimestamp = deletionTimestamp
@@ -87,6 +111,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
}
}
var uint0 uint = 0
var uint1 uint = 1
tests := []struct {
description string
@@ -104,16 +129,25 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
NodeAffinityType: []string{"requiredDuringSchedulingRequiredDuringExecution"},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels, nil),
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingRequiredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
},
{
description: "Pod is correctly scheduled on node, no eviction expected",
description: "Pod is correctly scheduled on node, no eviction expected [required affinity]",
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithLabels, nil),
pods: addPodsToNode(nodeWithLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithLabels},
},
{
description: "Pod is correctly scheduled on node, no eviction expected [preferred affinity]",
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithLabels},
},
{
@@ -122,66 +156,176 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil),
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should not be evicted",
description: "Pod is scheduled on node without matching labels, another schedulable node available with better fit, should be evicted",
expectedEvictedPodCount: 1,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should be evicted [required affinity]",
expectedEvictedPodCount: 1,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil),
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, no pod evicted since pod terminting",
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should be evicted [preferred affinity]",
expectedEvictedPodCount: 1,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, should not be evicted",
expectedEvictedPodCount: 1,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, no pod evicted since pod terminting",
expectedEvictedPodCount: 1,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 0, should be not evicted [required affinity]",
expectedEvictedPodCount: 0,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil),
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: &uint0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 0, should be not evicted [preferred affinity]",
expectedEvictedPodCount: 0,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: &uint0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, no pod evicted since pod terminating [required affinity]",
expectedEvictedPodCount: 0,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, no pod evicted since pod terminating [preferred affinity]",
expectedEvictedPodCount: 0,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, should be evicted [required affinity]",
expectedEvictedPodCount: 1,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, should be evicted [preferred affinity]",
expectedEvictedPodCount: 1,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 0, should not be evicted [required affinity]",
expectedEvictedPodCount: 0,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 0, should not be evicted [preferred affinity]",
expectedEvictedPodCount: 0,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, no pod evicted since pod terminting [required affinity]",
expectedEvictedPodCount: 0,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, no pod evicted since pod terminting [preferred affinity]",
expectedEvictedPodCount: 0,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict [required affinity]",
expectedEvictedPodCount: 0,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
nodefit: true,
},
{
description: "Pod is scheduled on node without matching labels, and node where pod fits is available, should evict",
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict [preferred affinity]",
expectedEvictedPodCount: 0,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
nodefit: true,
},
{
description: "Pod is scheduled on node without matching labels, and unschedulable node where pod could fit is available, should not evict [required affinity]",
expectedEvictedPodCount: 0,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil),
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
maxPodsToEvictPerNode: &uint1,
nodefit: true,
},
{
description: "Pod is scheduled on node without matching labels, and unschedulable node where pod could fit is available, should not evict [preferred affinity]",
expectedEvictedPodCount: 0,
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
maxPodsToEvictPerNode: &uint1,
nodefit: true,

View File

@@ -50,10 +50,10 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingNodeTaintsArgs, got %T", args)
}
var includedNamespaces, excludedNamespaces sets.String
var includedNamespaces, excludedNamespaces sets.Set[string]
if nodeTaintsArgs.Namespaces != nil {
includedNamespaces = sets.NewString(nodeTaintsArgs.Namespaces.Include...)
excludedNamespaces = sets.NewString(nodeTaintsArgs.Namespaces.Exclude...)
includedNamespaces = sets.New(nodeTaintsArgs.Namespaces.Include...)
excludedNamespaces = sets.New(nodeTaintsArgs.Namespaces.Exclude...)
}
// We can combine Filter and PreEvictionFilter since for this strategy it does not matter where we run PreEvictionFilter
@@ -67,7 +67,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
}
excludedTaints := sets.NewString(nodeTaintsArgs.ExcludedTaints...)
excludedTaints := sets.New(nodeTaintsArgs.ExcludedTaints...)
excludeTaint := func(taint *v1.Taint) bool {
// Exclude taints by key *or* key=value
return excludedTaints.Has(taint.Key) || (taint.Value != "" && excludedTaints.Has(fmt.Sprintf("%s=%s", taint.Key, taint.Value)))

View File

@@ -14,7 +14,9 @@ limitations under the License.
package removepodsviolatingtopologyspreadconstraint
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
utilpointer "k8s.io/utils/pointer"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
@@ -31,7 +33,10 @@ func SetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(obj runtime.Obj
if args.LabelSelector == nil {
args.LabelSelector = nil
}
if !args.IncludeSoftConstraints {
args.IncludeSoftConstraints = false
if args.TopologyBalanceNodeFit == nil {
args.TopologyBalanceNodeFit = utilpointer.Bool(true)
}
if len(args.Constraints) == 0 {
args.Constraints = append(args.Constraints, v1.DoNotSchedule)
}
}

View File

@@ -17,12 +17,24 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/descheduler/pkg/api"
)
var scheme *runtime.Scheme
func init() {
scheme = runtime.NewScheme()
scheme.AddTypeDefaultingFunc(&RemovePodsViolatingTopologySpreadConstraintArgs{}, func(obj interface{}) {
SetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(obj.(*RemovePodsViolatingTopologySpreadConstraintArgs))
})
utilruntime.Must(AddToScheme(scheme))
}
func TestSetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(t *testing.T) {
tests := []struct {
name string
@@ -35,26 +47,54 @@ func TestSetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(t *testing.
want: &RemovePodsViolatingTopologySpreadConstraintArgs{
Namespaces: nil,
LabelSelector: nil,
IncludeSoftConstraints: false,
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
TopologyBalanceNodeFit: utilpointer.Bool(true),
},
},
{
name: "RemovePodsViolatingTopologySpreadConstraintArgs with value",
in: &RemovePodsViolatingTopologySpreadConstraintArgs{
Namespaces: &api.Namespaces{},
LabelSelector: &metav1.LabelSelector{},
IncludeSoftConstraints: true,
Namespaces: &api.Namespaces{},
LabelSelector: &metav1.LabelSelector{},
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
},
want: &RemovePodsViolatingTopologySpreadConstraintArgs{
Namespaces: &api.Namespaces{},
LabelSelector: &metav1.LabelSelector{},
IncludeSoftConstraints: true,
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
TopologyBalanceNodeFit: utilpointer.Bool(true),
},
},
{
name: "RemovePodsViolatingTopologySpreadConstraintArgs without TopologyBalanceNodeFit",
in: &RemovePodsViolatingTopologySpreadConstraintArgs{},
want: &RemovePodsViolatingTopologySpreadConstraintArgs{
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
TopologyBalanceNodeFit: utilpointer.Bool(true),
},
},
{
name: "RemovePodsViolatingTopologySpreadConstraintArgs with TopologyBalanceNodeFit=false",
in: &RemovePodsViolatingTopologySpreadConstraintArgs{
TopologyBalanceNodeFit: utilpointer.Bool(false),
},
want: &RemovePodsViolatingTopologySpreadConstraintArgs{
TopologyBalanceNodeFit: utilpointer.Bool(false),
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
},
},
{
name: "RemovePodsViolatingTopologySpreadConstraintArgs with nil constraints",
in: &RemovePodsViolatingTopologySpreadConstraintArgs{
Constraints: nil,
},
want: &RemovePodsViolatingTopologySpreadConstraintArgs{
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
TopologyBalanceNodeFit: utilpointer.Bool(true),
},
},
}
for _, tc := range tests {
scheme := runtime.NewScheme()
utilruntime.Must(AddToScheme(scheme))
t.Run(tc.name, func(t *testing.T) {
scheme.Default(tc.in)
if diff := cmp.Diff(tc.in, tc.want); diff != "" {

View File

@@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/descheduler/node"
@@ -102,41 +103,40 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
// iterate through all topoPairs for this topologyKey and diff currentPods -minPods <=maxSkew
// if diff > maxSkew, add this pod in the current bucket for eviction
// First record all of the constraints by namespace
client := d.handle.ClientSet()
namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
klog.ErrorS(err, "Couldn't list namespaces")
return &frameworktypes.Status{
Err: fmt.Errorf("list namespace: %w", err),
}
}
klog.V(1).InfoS("Processing namespaces for topology spread constraints")
klog.V(1).Info("Processing namespaces for topology spread constraints")
podsForEviction := make(map[*v1.Pod]struct{})
var includedNamespaces, excludedNamespaces sets.String
var includedNamespaces, excludedNamespaces sets.Set[string]
if d.args.Namespaces != nil {
includedNamespaces = sets.NewString(d.args.Namespaces.Include...)
excludedNamespaces = sets.NewString(d.args.Namespaces.Exclude...)
includedNamespaces = sets.New(d.args.Namespaces.Include...)
excludedNamespaces = sets.New(d.args.Namespaces.Exclude...)
}
// 1. for each namespace...
for _, namespace := range namespaces.Items {
if (len(includedNamespaces) > 0 && !includedNamespaces.Has(namespace.Name)) ||
(len(excludedNamespaces) > 0 && excludedNamespaces.Has(namespace.Name)) {
continue
pods, err := podutil.ListPodsOnNodes(nodes, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
if err != nil {
return &frameworktypes.Status{
Err: fmt.Errorf("error listing all pods: %v", err),
}
namespacePods, err := client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {
klog.ErrorS(err, "Couldn't list pods in namespace", "namespace", namespace)
}
allowedConstraints := sets.New[v1.UnsatisfiableConstraintAction](d.args.Constraints...)
namespacedPods := podutil.GroupByNamespace(pods)
// 1. for each namespace...
for namespace := range namespacedPods {
klog.V(4).InfoS("Processing namespace for topology spread constraints", "namespace", namespace)
if (len(includedNamespaces) > 0 && !includedNamespaces.Has(namespace)) ||
(len(excludedNamespaces) > 0 && excludedNamespaces.Has(namespace)) {
continue
}
// ...where there is a topology constraint
namespaceTopologySpreadConstraints := []v1.TopologySpreadConstraint{}
for _, pod := range namespacePods.Items {
for _, pod := range namespacedPods[namespace] {
for _, constraint := range pod.Spec.TopologySpreadConstraints {
// Ignore soft topology constraints if they are not included
if constraint.WhenUnsatisfiable == v1.ScheduleAnyway && (d.args == nil || !d.args.IncludeSoftConstraints) {
// Ignore topology constraints if they are not included
if !allowedConstraints.Has(constraint.WhenUnsatisfiable) {
continue
}
// Need to check v1.TopologySpreadConstraint deepEquality because
@@ -172,18 +172,18 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
// 3. for each evictable pod in that namespace
// (this loop is where we count the number of pods per topologyValue that match this constraint's selector)
var sumPods float64
for i := range namespacePods.Items {
for _, pod := range namespacedPods[namespace] {
// skip pods that are being deleted.
if utils.IsPodTerminating(&namespacePods.Items[i]) {
if utils.IsPodTerminating(pod) {
continue
}
// 4. if the pod matches this TopologySpreadConstraint LabelSelector
if !selector.Matches(labels.Set(namespacePods.Items[i].Labels)) {
if !selector.Matches(labels.Set(pod.Labels)) {
continue
}
// 5. If the pod's node matches this constraint's topologyKey, create a topoPair and add the pod
node, ok := nodeMap[namespacePods.Items[i].Spec.NodeName]
node, ok := nodeMap[pod.Spec.NodeName]
if !ok {
// If ok is false, node is nil in which case node.Labels will panic. In which case a pod is yet to be scheduled. So it's safe to just continue here.
continue
@@ -195,14 +195,14 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
// 6. create a topoPair with key as this TopologySpreadConstraint
topoPair := topologyPair{key: constraint.TopologyKey, value: nodeValue}
// 7. add the pod with key as this topoPair
constraintTopologies[topoPair] = append(constraintTopologies[topoPair], &namespacePods.Items[i])
constraintTopologies[topoPair] = append(constraintTopologies[topoPair], pod)
sumPods++
}
if topologyIsBalanced(constraintTopologies, constraint) {
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint)
continue
}
balanceDomains(d.handle.GetPodsAssignedToNodeFunc(), podsForEviction, constraint, constraintTopologies, sumPods, d.handle.Evictor().Filter, nodes)
d.balanceDomains(podsForEviction, constraint, constraintTopologies, sumPods, nodes)
}
}
@@ -275,17 +275,18 @@ func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, constraint v1.Topol
// Following this, the above topology domains end up "sorted" as:
// [5, 5, 5, 5, 5, 5]
// (assuming even distribution by the scheduler of the evicted pods)
func balanceDomains(
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
func (d *RemovePodsViolatingTopologySpreadConstraint) balanceDomains(
podsForEviction map[*v1.Pod]struct{},
constraint v1.TopologySpreadConstraint,
constraintTopologies map[topologyPair][]*v1.Pod,
sumPods float64,
isEvictable func(pod *v1.Pod) bool,
nodes []*v1.Node,
) {
idealAvg := sumPods / float64(len(constraintTopologies))
isEvictable := d.handle.Evictor().Filter
sortedDomains := sortDomains(constraintTopologies, isEvictable)
getPodsAssignedToNode := d.handle.GetPodsAssignedToNodeFunc()
topologyBalanceNodeFit := utilpointer.BoolDeref(d.args.TopologyBalanceNodeFit, true)
nodesBelowIdealAvg := filterNodesBelowIdealAvg(nodes, sortedDomains, constraint.TopologyKey, idealAvg)
@@ -340,7 +341,7 @@ func balanceDomains(
// This is because the chosen pods aren't sorted, but immovable pods still count as "evicted" toward the PTS algorithm.
// So, a better selection heuristic could improve performance.
if !node.PodFitsAnyOtherNode(getPodsAssignedToNode, aboveToEvict[k], nodesBelowIdealAvg) {
if topologyBalanceNodeFit && !node.PodFitsAnyOtherNode(getPodsAssignedToNode, aboveToEvict[k], nodesBelowIdealAvg) {
klog.V(2).InfoS("ignoring pod for eviction as it does not fit on any other node", "pod", klog.KObj(aboveToEvict[k]))
continue
}

View File

@@ -3,6 +3,7 @@ package removepodsviolatingtopologyspreadconstraint
import (
"context"
"fmt"
"sort"
"testing"
v1 "k8s.io/api/core/v1"
@@ -14,6 +15,7 @@ import (
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/events"
utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
@@ -98,6 +100,43 @@ func TestTopologySpreadConstraint(t *testing.T) {
namespaces: []string{"ns1"},
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
},
{
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2] (both constraints)",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
},
pods: createTestPods([]testPodList{
{
count: 1,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 1,
TopologyKey: "zone",
WhenUnsatisfiable: v1.ScheduleAnyway,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
},
{
count: 2,
node: "n1",
labels: map[string]string{"foo": "bar"},
},
{
count: 1,
node: "n2",
labels: map[string]string{"foo": "bar"},
},
}),
expectedEvictedCount: 1,
namespaces: []string{"ns1"},
args: RemovePodsViolatingTopologySpreadConstraintArgs{
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
},
},
{
name: "2 domains, sizes [3,1], maxSkew=1, move 1 pod to achieve [2,2] (soft constraints)",
nodes: []*v1.Node{
@@ -131,7 +170,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
}),
expectedEvictedCount: 1,
namespaces: []string{"ns1"},
args: RemovePodsViolatingTopologySpreadConstraintArgs{IncludeSoftConstraints: true},
args: RemovePodsViolatingTopologySpreadConstraintArgs{
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
},
},
{
name: "2 domains, sizes [3,1], maxSkew=1, no pods eligible, move 0 pods",
@@ -587,7 +628,9 @@ func TestTopologySpreadConstraint(t *testing.T) {
}),
expectedEvictedCount: 1,
namespaces: []string{"ns1"},
args: RemovePodsViolatingTopologySpreadConstraintArgs{IncludeSoftConstraints: true},
args: RemovePodsViolatingTopologySpreadConstraintArgs{
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
},
},
{
name: "3 domains size [8 7 0], maxSkew=1, should move 5 to get [5 5 5]",
@@ -611,7 +654,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
}),
expectedEvictedCount: 5,
expectedEvictedPods: []string{"pod-5", "pod-6", "pod-7", "pod-8"},
expectedEvictedPods: []string{"pod-5", "pod-6", "pod-7", "pod-8", "pod-9"},
namespaces: []string{"ns1"},
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
},
@@ -923,6 +966,38 @@ func TestTopologySpreadConstraint(t *testing.T) {
args: RemovePodsViolatingTopologySpreadConstraintArgs{},
nodeFit: true,
},
{
name: "3 domains, sizes [2,3,4], maxSkew=1, args.NodeFit is false, and not enough cpu on zoneA; 1 should be moved to force scale-up",
nodes: []*v1.Node{
test.BuildTestNode("n1", 250, 2000, 9, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 1000, 2000, 9, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
test.BuildTestNode("n3", 1000, 2000, 9, func(n *v1.Node) { n.Labels["zone"] = "zoneC" }),
},
pods: createTestPods([]testPodList{
{
count: 2,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
{
count: 3,
node: "n2",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
{
count: 4,
node: "n3",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
}),
expectedEvictedCount: 1,
namespaces: []string{"ns1"},
args: RemovePodsViolatingTopologySpreadConstraintArgs{TopologyBalanceNodeFit: utilpointer.Bool(false)},
nodeFit: true,
},
{
name: "3 domains, sizes [[1,0], [1,1], [2,1]], maxSkew=1, NodeFit is enabled, and not enough cpu on ZoneA; nothing should be moved",
nodes: []*v1.Node{
@@ -1137,11 +1212,21 @@ func TestTopologySpreadConstraint(t *testing.T) {
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
podsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
// workaround to ensure that pods are returned sorted so 'expectedEvictedPods' would work consistently
getPodsAssignedToNode := func(s string, filterFunc podutil.FilterFunc) ([]*v1.Pod, error) {
pods, err := podsAssignedToNode(s, filterFunc)
sort.Slice(pods, func(i, j int) bool {
return pods[i].Name < pods[j].Name
})
return pods, err
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
@@ -1200,6 +1285,8 @@ func TestTopologySpreadConstraint(t *testing.T) {
SharedInformerFactoryImpl: sharedInformerFactory,
}
SetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(&tc.args)
plugin, err := New(
&tc.args,
handle,
@@ -1215,12 +1302,12 @@ func TestTopologySpreadConstraint(t *testing.T) {
}
if tc.expectedEvictedPods != nil {
diff := sets.NewString(tc.expectedEvictedPods...).Difference(sets.NewString(evictedPods...))
diff := sets.New(tc.expectedEvictedPods...).Difference(sets.New(evictedPods...))
if diff.Len() > 0 {
t.Errorf(
"Expected pods %v to be evicted but %v were not evicted. Actual pods evicted: %v",
tc.expectedEvictedPods,
diff.List(),
diff.UnsortedList(),
evictedPods,
)
}
@@ -1297,8 +1384,7 @@ func TestCheckIdenticalConstraints(t *testing.T) {
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
}
namespaceTopologySpreadConstraint := []v1.TopologySpreadConstraint{}
namespaceTopologySpreadConstraint = []v1.TopologySpreadConstraint{
namespaceTopologySpreadConstraint := []v1.TopologySpreadConstraint{
{
MaxSkew: 2,
TopologyKey: "zone",

View File

@@ -17,6 +17,7 @@ limitations under the License.
package removepodsviolatingtopologyspreadconstraint
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/descheduler/pkg/api"
)
@@ -28,7 +29,8 @@ import (
type RemovePodsViolatingTopologySpreadConstraintArgs struct {
metav1.TypeMeta `json:",inline"`
Namespaces *api.Namespaces `json:"namespaces"`
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
IncludeSoftConstraints bool `json:"includeSoftConstraints"`
Namespaces *api.Namespaces `json:"namespaces"`
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
Constraints []v1.UnsatisfiableConstraintAction `json:"constraints"`
TopologyBalanceNodeFit *bool `json:"topologyBalanceNodeFit"`
}

View File

@@ -19,23 +19,37 @@ package removepodsviolatingtopologyspreadconstraint
import (
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
)
// ValidateRemovePodsViolatingTopologySpreadConstraintArgs validates RemovePodsViolatingTopologySpreadConstraint arguments
func ValidateRemovePodsViolatingTopologySpreadConstraintArgs(obj runtime.Object) error {
var errs []error
args := obj.(*RemovePodsViolatingTopologySpreadConstraintArgs)
// At most one of include/exclude can be set
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
errs = append(errs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
}
if args.LabelSelector != nil {
if _, err := metav1.LabelSelectorAsSelector(args.LabelSelector); err != nil {
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
errs = append(errs, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err))
}
}
return nil
if len(args.Constraints) > 0 {
supportedConstraints := sets.New(v1.DoNotSchedule, v1.ScheduleAnyway)
for _, constraint := range args.Constraints {
if !supportedConstraints.Has(constraint) {
errs = append(errs, fmt.Errorf("constraint %s is not one of %v", constraint, supportedConstraints))
}
}
}
return errors.NewAggregate(errs)
}

View File

@@ -0,0 +1,85 @@
package removepodsviolatingtopologyspreadconstraint
import (
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/descheduler/pkg/api"
)
func TestValidateRemovePodsViolatingTopologySpreadConstraintArgs(t *testing.T) {
testCases := []struct {
description string
args *RemovePodsViolatingTopologySpreadConstraintArgs
expectError bool
}{
{
description: "valid namespace args, no errors",
args: &RemovePodsViolatingTopologySpreadConstraintArgs{
Namespaces: &api.Namespaces{
Include: []string{"default"},
},
},
expectError: false,
},
{
description: "invalid namespaces args, expects error",
args: &RemovePodsViolatingTopologySpreadConstraintArgs{
Namespaces: &api.Namespaces{
Include: []string{"default"},
Exclude: []string{"kube-system"},
},
},
expectError: true,
},
{
description: "valid label selector args, no errors",
args: &RemovePodsViolatingTopologySpreadConstraintArgs{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"role.kubernetes.io/node": ""},
},
},
expectError: false,
},
{
description: "invalid label selector args, expects errors",
args: &RemovePodsViolatingTopologySpreadConstraintArgs{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Operator: metav1.LabelSelectorOpIn,
},
},
},
},
expectError: true,
},
{
description: "valid constraints args, no errors",
args: &RemovePodsViolatingTopologySpreadConstraintArgs{
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
},
expectError: false,
},
{
description: "invalid constraints args, expects errors",
args: &RemovePodsViolatingTopologySpreadConstraintArgs{
Constraints: []v1.UnsatisfiableConstraintAction{"foo"},
},
expectError: true,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidateRemovePodsViolatingTopologySpreadConstraintArgs(tc.args)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
}
})
}
}

View File

@@ -22,6 +22,7 @@ limitations under the License.
package removepodsviolatingtopologyspreadconstraint
import (
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
api "sigs.k8s.io/descheduler/pkg/api"
@@ -41,6 +42,16 @@ func (in *RemovePodsViolatingTopologySpreadConstraintArgs) DeepCopyInto(out *Rem
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.Constraints != nil {
in, out := &in.Constraints, &out.Constraints
*out = make([]corev1.UnsatisfiableConstraintAction, len(*in))
copy(*out, *in)
}
if in.TopologyBalanceNodeFit != nil {
in, out := &in.TopologyBalanceNodeFit, &out.TopologyBalanceNodeFit
*out = new(bool)
**out = **in
}
return
}

View File

@@ -18,12 +18,15 @@ import (
"fmt"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"sigs.k8s.io/descheduler/metrics"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/pkg/tracing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/errors"
@@ -113,10 +116,10 @@ type profileImpl struct {
preEvictionFilterPlugins []preEvictionFilterPlugin
// Each extension point with a list of plugins implementing the extension point.
deschedule sets.String
balance sets.String
filter sets.String
preEvictionFilter sets.String
deschedule sets.Set[string]
balance sets.Set[string]
filter sets.Set[string]
preEvictionFilter sets.Set[string]
}
// Option for the handleImpl.
@@ -184,10 +187,10 @@ func buildPlugin(config api.DeschedulerProfile, pluginName string, handle *handl
}
func (p *profileImpl) registryToExtensionPoints(registry pluginregistry.Registry) {
p.deschedule = sets.NewString()
p.balance = sets.NewString()
p.filter = sets.NewString()
p.preEvictionFilter = sets.NewString()
p.deschedule = sets.New[string]()
p.balance = sets.New[string]()
p.filter = sets.New[string]()
p.preEvictionFilter = sets.New[string]()
for plugin, pluginUtilities := range registry {
if _, ok := pluginUtilities.PluginType.(frameworktypes.DeschedulePlugin); ok {
@@ -232,16 +235,16 @@ func NewProfile(config api.DeschedulerProfile, reg pluginregistry.Registry, opts
pi.registryToExtensionPoints(reg)
if !pi.deschedule.HasAll(config.Plugins.Deschedule.Enabled...) {
return nil, fmt.Errorf("profile %q configures deschedule extension point of non-existing plugins: %v", config.Name, sets.NewString(config.Plugins.Deschedule.Enabled...).Difference(pi.deschedule))
return nil, fmt.Errorf("profile %q configures deschedule extension point of non-existing plugins: %v", config.Name, sets.New(config.Plugins.Deschedule.Enabled...).Difference(pi.deschedule))
}
if !pi.balance.HasAll(config.Plugins.Balance.Enabled...) {
return nil, fmt.Errorf("profile %q configures balance extension point of non-existing plugins: %v", config.Name, sets.NewString(config.Plugins.Balance.Enabled...).Difference(pi.balance))
return nil, fmt.Errorf("profile %q configures balance extension point of non-existing plugins: %v", config.Name, sets.New(config.Plugins.Balance.Enabled...).Difference(pi.balance))
}
if !pi.filter.HasAll(config.Plugins.Filter.Enabled...) {
return nil, fmt.Errorf("profile %q configures filter extension point of non-existing plugins: %v", config.Name, sets.NewString(config.Plugins.Filter.Enabled...).Difference(pi.filter))
return nil, fmt.Errorf("profile %q configures filter extension point of non-existing plugins: %v", config.Name, sets.New(config.Plugins.Filter.Enabled...).Difference(pi.filter))
}
if !pi.preEvictionFilter.HasAll(config.Plugins.PreEvictionFilter.Enabled...) {
return nil, fmt.Errorf("profile %q configures preEvictionFilter extension point of non-existing plugins: %v", config.Name, sets.NewString(config.Plugins.PreEvictionFilter.Enabled...).Difference(pi.preEvictionFilter))
return nil, fmt.Errorf("profile %q configures preEvictionFilter extension point of non-existing plugins: %v", config.Name, sets.New(config.Plugins.PreEvictionFilter.Enabled...).Difference(pi.preEvictionFilter))
}
handle := &handleImpl{
@@ -258,7 +261,7 @@ func NewProfile(config api.DeschedulerProfile, reg pluginregistry.Registry, opts
pluginNames = append(pluginNames, config.Plugins.PreEvictionFilter.Enabled...)
plugins := make(map[string]frameworktypes.Plugin)
for _, plugin := range sets.NewString(pluginNames...).List() {
for _, plugin := range sets.New(pluginNames...).UnsortedList() {
pg, err := buildPlugin(config, plugin, handle, reg)
if err != nil {
return nil, fmt.Errorf("unable to build %v plugin: %v", plugin, err)
@@ -300,6 +303,9 @@ func NewProfile(config api.DeschedulerProfile, reg pluginregistry.Registry, opts
func (d profileImpl) RunDeschedulePlugins(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
errs := []error{}
for _, pl := range d.deschedulePlugins {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, pl.Name(), trace.WithAttributes(attribute.String("plugin", pl.Name()), attribute.String("profile", d.profileName), attribute.String("operation", tracing.DescheduleOperation)))
defer span.End()
evicted := d.podEvictor.TotalEvicted()
// TODO: strategyName should be accessible from within the strategy using a framework
// handle or function which the Evictor has access to. For migration/in-progress framework
@@ -311,6 +317,7 @@ func (d profileImpl) RunDeschedulePlugins(ctx context.Context, nodes []*v1.Node)
metrics.DeschedulerStrategyDuration.With(map[string]string{"strategy": pl.Name(), "profile": d.profileName}).Observe(time.Since(strategyStart).Seconds())
if status != nil && status.Err != nil {
span.AddEvent("Plugin Execution Failed", trace.WithAttributes(attribute.String("err", status.Err.Error())))
errs = append(errs, fmt.Errorf("plugin %q finished with error: %v", pl.Name(), status.Err))
}
klog.V(1).InfoS("Total number of pods evicted", "extension point", "Deschedule", "evictedPods", d.podEvictor.TotalEvicted()-evicted)
@@ -329,6 +336,9 @@ func (d profileImpl) RunDeschedulePlugins(ctx context.Context, nodes []*v1.Node)
func (d profileImpl) RunBalancePlugins(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
errs := []error{}
for _, pl := range d.balancePlugins {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, pl.Name(), trace.WithAttributes(attribute.String("plugin", pl.Name()), attribute.String("profile", d.profileName), attribute.String("operation", tracing.BalanceOperation)))
defer span.End()
evicted := d.podEvictor.TotalEvicted()
// TODO: strategyName should be accessible from within the strategy using a framework
// handle or function which the Evictor has access to. For migration/in-progress framework
@@ -340,6 +350,7 @@ func (d profileImpl) RunBalancePlugins(ctx context.Context, nodes []*v1.Node) *f
metrics.DeschedulerStrategyDuration.With(map[string]string{"strategy": pl.Name(), "profile": d.profileName}).Observe(time.Since(strategyStart).Seconds())
if status != nil && status.Err != nil {
span.AddEvent("Plugin Execution Failed", trace.WithAttributes(attribute.String("err", status.Err.Error())))
errs = append(errs, fmt.Errorf("plugin %q finished with error: %v", pl.Name(), status.Err))
}
klog.V(1).InfoS("Total number of pods evicted", "extension point", "Balance", "evictedPods", d.podEvictor.TotalEvicted()-evicted)

View File

@@ -444,19 +444,19 @@ func TestProfileExtensionPoints(t *testing.T) {
// Validate the extension points of all registered plugins are properly detected
diff := cmp.Diff(sets.NewString("DeschedulePlugin_0", "DeschedulePlugin_1", "DeschedulePlugin_2", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2"), prfl.deschedule)
diff := cmp.Diff(sets.New("DeschedulePlugin_0", "DeschedulePlugin_1", "DeschedulePlugin_2", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2"), prfl.deschedule)
if diff != "" {
t.Errorf("check for deschedule failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
}
diff = cmp.Diff(sets.NewString("BalancePlugin_0", "BalancePlugin_1", "BalancePlugin_2", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2"), prfl.balance)
diff = cmp.Diff(sets.New("BalancePlugin_0", "BalancePlugin_1", "BalancePlugin_2", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2"), prfl.balance)
if diff != "" {
t.Errorf("check for balance failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
}
diff = cmp.Diff(sets.NewString("DefaultEvictor", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2", "FilterPlugin_0", "FilterPlugin_1", "FilterPlugin_2"), prfl.filter)
diff = cmp.Diff(sets.New("DefaultEvictor", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2", "FilterPlugin_0", "FilterPlugin_1", "FilterPlugin_2"), prfl.filter)
if diff != "" {
t.Errorf("check for filter failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
}
diff = cmp.Diff(sets.NewString("DefaultEvictor", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2", "FilterPlugin_0", "FilterPlugin_1", "FilterPlugin_2"), prfl.preEvictionFilter)
diff = cmp.Diff(sets.New("DefaultEvictor", "FakePlugin_0", "FakePlugin_1", "FakePlugin_2", "FilterPlugin_0", "FilterPlugin_1", "FilterPlugin_2"), prfl.preEvictionFilter)
if diff != "" {
t.Errorf("check for preEvictionFilter failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
}
@@ -467,7 +467,7 @@ func TestProfileExtensionPoints(t *testing.T) {
names = append(names, pl.Name())
}
sort.Strings(names)
diff = cmp.Diff(sets.NewString("FakePlugin_0"), sets.NewString(names...))
diff = cmp.Diff(sets.New("FakePlugin_0"), sets.New(names...))
if diff != "" {
t.Errorf("check for deschedule failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
}
@@ -478,7 +478,7 @@ func TestProfileExtensionPoints(t *testing.T) {
names = append(names, pl.Name())
}
sort.Strings(names)
diff = cmp.Diff(sets.NewString(), sets.NewString(names...))
diff = cmp.Diff(sets.New[string](), sets.New(names...))
if diff != "" {
t.Errorf("check for balance failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
}
@@ -489,7 +489,7 @@ func TestProfileExtensionPoints(t *testing.T) {
names = append(names, pl.Name())
}
sort.Strings(names)
diff = cmp.Diff(sets.NewString("DefaultEvictor", "FilterPlugin_0", "FilterPlugin_1"), sets.NewString(names...))
diff = cmp.Diff(sets.New("DefaultEvictor", "FilterPlugin_0", "FilterPlugin_1"), sets.New(names...))
if diff != "" {
t.Errorf("check for filter failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
}

156
pkg/tracing/tracing.go Normal file
View File

@@ -0,0 +1,156 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tracing
import (
"context"
"crypto/x509"
"os"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/propagation"
sdkresource "go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc/credentials"
"k8s.io/klog/v2"
)
const (
// DefaultServiceName is the default service name used for tracing.
DefaultServiceName = "descheduler"
// DescheduleOperation is the operation name used for Deschedule() functions.
DescheduleOperation = "deschedule"
// BalanceOperation is the operation name used for Balance() functions.
BalanceOperation = "balance"
// EvictOperation is the operation name used for Evict() functions.
EvictOperation = "evict"
// TracerName is used to setup the named tracer
TracerName = "sigs.k8s.io/descheduler"
)
var (
tracer trace.Tracer
provider trace.TracerProvider
)
func init() {
provider = trace.NewNoopTracerProvider()
tracer = provider.Tracer(TracerName)
}
func Tracer() trace.Tracer {
return tracer
}
// NewTracerProvider creates a new trace provider with the given options.
func NewTracerProvider(ctx context.Context, endpoint, caCert, name, namespace string, sampleRate float64, fallbackToNoOpTracer bool) (err error) {
defer func(p trace.TracerProvider) {
if err != nil && !fallbackToNoOpTracer {
return
}
if err != nil && fallbackToNoOpTracer {
klog.ErrorS(err, "ran into an error trying to setup a trace provider. Falling back to NoOp provider")
err = nil
provider = trace.NewNoopTracerProvider()
}
otel.SetTextMapPropagator(propagation.TraceContext{})
otel.SetTracerProvider(provider)
otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) {
klog.ErrorS(err, "got error from opentelemetry")
}))
tracer = otel.GetTracerProvider().Tracer(TracerName)
}(provider)
if endpoint == "" {
klog.V(2).Info("Did not find a trace collector endpoint defined. Switching to NoopTraceProvider")
provider = trace.NewNoopTracerProvider()
return
}
var opts []otlptracegrpc.Option
opts = append(opts, otlptracegrpc.WithEndpoint(endpoint))
var data []byte
if caCert != "" {
data, err = os.ReadFile(caCert)
if err != nil {
klog.ErrorS(err, "failed to extract ca certificate required to generate the transport credentials")
return err
}
pool := x509.NewCertPool()
if !pool.AppendCertsFromPEM(data) {
klog.Error("failed to create cert pool using the ca certificate provided for generating the transport credentials")
return err
}
klog.Info("Enabling trace GRPC client in secure TLS mode")
opts = append(opts, otlptracegrpc.WithTLSCredentials(credentials.NewClientTLSFromCert(pool, "")))
} else {
klog.Info("Enabling trace GRPC client in insecure mode")
opts = append(opts, otlptracegrpc.WithInsecure())
}
client := otlptracegrpc.NewClient(opts...)
exporter, err := otlptrace.New(ctx, client)
if err != nil {
klog.ErrorS(err, "failed to create an instance of the trace exporter")
return err
}
if name == "" {
klog.V(5).InfoS("no name provided, using default service name for tracing", "name", DefaultServiceName)
name = DefaultServiceName
}
resourceOpts := []sdkresource.Option{sdkresource.WithAttributes(semconv.ServiceNameKey.String(name)), sdkresource.WithSchemaURL(semconv.SchemaURL), sdkresource.WithProcess()}
if namespace != "" {
resourceOpts = append(resourceOpts, sdkresource.WithAttributes(semconv.ServiceNamespaceKey.String(namespace)))
}
resource, err := sdkresource.New(ctx, resourceOpts...)
if err != nil {
klog.ErrorS(err, "failed to create traceable resource")
return err
}
spanProcessor := sdktrace.NewBatchSpanProcessor(exporter)
provider = sdktrace.NewTracerProvider(
sdktrace.WithSampler(sdktrace.ParentBased(sdktrace.TraceIDRatioBased(sampleRate))),
sdktrace.WithSpanProcessor(spanProcessor),
sdktrace.WithResource(resource),
)
klog.V(2).Info("Successfully setup trace provider")
return
}
// Shutdown shuts down the global trace exporter.
func Shutdown(ctx context.Context) error {
tp, ok := provider.(*sdktrace.TracerProvider)
if !ok {
return nil
}
if err := tp.Shutdown(ctx); err != nil {
otel.Handle(err)
klog.ErrorS(err, "failed to shutdown the trace exporter")
return err
}
return nil
}

View File

@@ -213,3 +213,28 @@ func PodToleratesTaints(pod *v1.Pod, taintsOfNodes map[string][]v1.Taint) bool {
}
return false
}
// PodHasNodeAffinity returns true if the pod has a node affinity of type
// `nodeAffinityType` defined. The nodeAffinityType param can take this two values:
// "requiredDuringSchedulingIgnoredDuringExecution" or "requiredDuringSchedulingIgnoredDuringExecution"
func PodHasNodeAffinity(pod *v1.Pod, nodeAffinityType NodeAffinityType) bool {
if pod.Spec.Affinity == nil {
return false
}
if pod.Spec.Affinity.NodeAffinity == nil {
return false
}
if nodeAffinityType == RequiredDuringSchedulingIgnoredDuringExecution {
return pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil
} else if nodeAffinityType == PreferredDuringSchedulingIgnoredDuringExecution {
return len(pod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution) > 0
}
return false
}
type NodeAffinityType string
const (
RequiredDuringSchedulingIgnoredDuringExecution NodeAffinityType = "requiredDuringSchedulingIgnoredDuringExecution"
PreferredDuringSchedulingIgnoredDuringExecution NodeAffinityType = "preferredDuringSchedulingIgnoredDuringExecution"
)

View File

@@ -275,3 +275,27 @@ func TolerationsEqual(t1, t2 []v1.Toleration) bool {
}
return true
}
// Returns the weight that the pod gives to a node by analyzing the
// soft node affinity of that pod
// (nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution)
func GetNodeWeightGivenPodPreferredAffinity(pod *v1.Pod, node *v1.Node) (int32, error) {
if !PodHasNodeAffinity(pod, PreferredDuringSchedulingIgnoredDuringExecution) {
return 0, nil
}
// Iterate over each PreferredSchedulingTerm and check if it matches with the current node labels.
// If so, add the weight of the PreferredSchedulingTerm to the sum of weight. With that, we'll know
// the weight that the nodeAffinity from this pod gives to this node.
var sumWeights int32 = 0
for _, prefSchedulTerm := range pod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
preferredNodeSelector := &v1.NodeSelector{NodeSelectorTerms: []v1.NodeSelectorTerm{prefSchedulTerm.Preference}}
match, err := corev1.MatchNodeSelectorTerms(node, preferredNodeSelector)
if err != nil {
klog.ErrorS(err, "error parsing node selector", "selector", preferredNodeSelector)
}
if match {
sumWeights += prefSchedulTerm.Weight
}
}
return sumWeights, nil
}

View File

@@ -5,6 +5,7 @@ import (
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestUniqueSortTolerations(t *testing.T) {
@@ -938,3 +939,105 @@ func TestNodeSelectorTermsEqual(t *testing.T) {
})
}
}
func createNodeSelectorTerm(key, value string) v1.NodeSelectorTerm {
return v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: key,
Operator: "In",
Values: []string{value},
},
},
}
}
func TestPodNodeAffinityWeight(t *testing.T) {
defaultNode := v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"key1": "value1",
"key2": "value2",
"key3": "value3",
},
},
}
tests := []struct {
name string
affinity *v1.Affinity
expectedWeight int32
}{
{
name: "No affinity",
affinity: nil,
expectedWeight: 0,
},
{
name: "No node affinity",
affinity: &v1.Affinity{},
expectedWeight: 0,
},
{
name: "Empty preferred node affinity, but matching required node affinity",
affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
createNodeSelectorTerm("key1", "value1"),
},
},
},
},
expectedWeight: 0,
},
{
name: "Matching single key in preferred node affinity",
affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Weight: 10,
Preference: createNodeSelectorTerm("key1", "value1"),
},
{
Weight: 5,
Preference: createNodeSelectorTerm("key1", "valueX"),
},
},
},
},
expectedWeight: 10,
},
{
name: "Matching two keys in preferred node affinity",
affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Weight: 10,
Preference: createNodeSelectorTerm("key1", "value1"),
},
{
Weight: 5,
Preference: createNodeSelectorTerm("key2", "value2"),
},
},
},
},
expectedWeight: 15,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
pod := v1.Pod{}
pod.Spec.Affinity = test.affinity
totalWeight, err := GetNodeWeightGivenPodPreferredAffinity(&pod, &defaultNode)
if err != nil {
t.Error("Found non nil error")
}
if totalWeight != test.expectedWeight {
t.Errorf("Expected total weight is %v but actual total weight is %v", test.expectedWeight, totalWeight)
}
})
}
}

View File

@@ -17,8 +17,8 @@ const SystemCriticalPriority = 2 * int32(1000000000)
// GetNamespacesFromPodAffinityTerm returns a set of names
// according to the namespaces indicated in podAffinityTerm.
// If namespaces is empty it considers the given pod's namespace.
func GetNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffinityTerm) sets.String {
names := sets.String{}
func GetNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffinityTerm) sets.Set[string] {
names := sets.New[string]()
if len(podAffinityTerm.Namespaces) == 0 {
names.Insert(pod.Namespace)
} else {
@@ -29,7 +29,7 @@ func GetNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffini
// PodMatchesTermsNamespaceAndSelector returns true if the given <pod>
// matches the namespace and selector defined by <affinityPod>`s <term>.
func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, namespaces sets.String, selector labels.Selector) bool {
func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, namespaces sets.Set[string], selector labels.Selector) bool {
if !namespaces.Has(pod.Namespace) {
return false
}

View File

@@ -6,7 +6,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
)
var supportedQoSComputeResources = sets.NewString(string(v1.ResourceCPU), string(v1.ResourceMemory))
var supportedQoSComputeResources = sets.New(string(v1.ResourceCPU), string(v1.ResourceMemory))
// QOSList is a set of (resource name, QoS class) pairs.
type QOSList map[v1.ResourceName]v1.PodQOSClass
@@ -44,7 +44,7 @@ func GetPodQOS(pod *v1.Pod) v1.PodQOSClass {
}
}
// process limits
qosLimitsFound := sets.NewString()
qosLimitsFound := sets.New[string]()
for name, quantity := range container.Resources.Limits {
if !isSupportedQoSComputeResource(name) {
continue

View File

@@ -86,7 +86,7 @@ func TestRemoveDuplicates(t *testing.T) {
Containers: []v1.Container{{
Name: "pause",
ImagePullPolicy: "Always",
Image: "kubernetes/pause",
Image: "registry.k8s.io/pause",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: utilpointer.Bool(false),

View File

@@ -176,7 +176,7 @@ func createDeployment(ctx context.Context, clientSet clientset.Interface, namesp
Containers: []v1.Container{{
Name: "pause",
ImagePullPolicy: "Always",
Image: "kubernetes/pause",
Image: "registry.k8s.io/pause",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: utilpointer.Bool(false),

View File

@@ -70,7 +70,7 @@ func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
Containers: []v1.Container{{
Name: "pause",
ImagePullPolicy: "Never",
Image: "kubernetes/pause",
Image: "registry.k8s.io/pause",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
@@ -331,7 +331,7 @@ func TestLowNodeUtilization(t *testing.T) {
Containers: []v1.Container{{
Name: "pause",
ImagePullPolicy: "Never",
Image: "kubernetes/pause",
Image: "registry.k8s.io/pause",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
@@ -1336,7 +1336,7 @@ func createBalancedPodForNodes(
Containers: []v1.Container{
{
Name: "pause",
Image: "kubernetes/pause",
Image: "registry.k8s.io/pause",
Resources: v1.ResourceRequirements{
Limits: needCreateResource,
Requests: needCreateResource,

View File

@@ -87,7 +87,7 @@ func TestTooManyRestarts(t *testing.T) {
Containers: []v1.Container{{
Name: "pause",
ImagePullPolicy: "Always",
Image: "kubernetes/pause",
Image: "registry.k8s.io/pause",
Command: []string{"/bin/sh"},
Args: []string{"-c", "sleep 1s && exit 1"},
Ports: []v1.ContainerPort{{ContainerPort: 80}},

View File

@@ -103,7 +103,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
}
plugin, err := removepodsviolatingtopologyspreadconstraint.New(&removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
IncludeSoftConstraints: tc.constraint != v1.DoNotSchedule,
Constraints: []v1.UnsatisfiableConstraintAction{tc.constraint},
},
&frameworkfake.HandleImpl{
ClientsetImpl: clientSet,

View File

@@ -25,16 +25,16 @@ SKIP_INSTALL=${SKIP_INSTALL:-}
if [ -n "$KIND_E2E" ]; then
# If we did not set SKIP_INSTALL
if [ -z "$SKIP_INSTALL" ]; then
K8S_VERSION=${KUBERNETES_VERSION:-v1.27.0}
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && mv kubectl /usr/local/bin/
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.18.0/kind-linux-amd64
K8S_VERSION=${KUBERNETES_VERSION:-v1.28.0}
curl -Lo kubectl https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && mv kubectl /usr/local/bin/
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.20.0/kind-linux-amd64
chmod +x kind-linux-amd64
mv kind-linux-amd64 kind
export PATH=$PATH:$PWD
kind create cluster --image kindest/node:${K8S_VERSION} --config=./hack/kind_config.yaml
fi
${CONTAINER_ENGINE:-docker} pull kubernetes/pause
kind load docker-image kubernetes/pause
${CONTAINER_ENGINE:-docker} pull registry.k8s.io/pause
kind load docker-image registry.k8s.io/pause
kind get kubeconfig > /tmp/admin.conf
export KUBECONFIG="/tmp/admin.conf"
mkdir -p ~/gopath/src/sigs.k8s.io/

View File

@@ -0,0 +1,68 @@
/*
Package antlr implements the Go version of the ANTLR 4 runtime.
# The ANTLR Tool
ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
(or visitor) that makes it easy to respond to the recognition of phrases of interest.
# Code Generation
ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
runtime library, written specifically to support the generated code in the target language. This library is the
runtime for the Go target.
To generate code for the go target, it is generally recommended to place the source grammar files in a package of
their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other
way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
your IDE, or configuration in your CI system.
Here is a general template for an ANTLR based recognizer in Go:
.
├── myproject
├── parser
│ ├── mygrammar.g4
│ ├── antlr-4.12.0-complete.jar
│ ├── error_listeners.go
│ ├── generate.go
│ ├── generate.sh
├── go.mod
├── go.sum
├── main.go
└── main_test.go
Make sure that the package statement in your grammar file(s) reflects the go package they exist in.
The generate.go file then looks like this:
package parser
//go:generate ./generate.sh
And the generate.sh file will look similar to this:
#!/bin/sh
alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
antlr4 -Dlanguage=Go -no-visitor -package parser *.g4
depending on whether you want visitors or listeners or any other ANTLR options.
From the command line at the root of your package “myproject” you can then simply issue the command:
go generate ./...
# Copyright Notice
Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
*/
package antlr

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -6,11 +6,24 @@ package antlr
import "sync"
// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or
// which is invalid for a particular struct such as [*antlr.BaseRuleContext]
var ATNInvalidAltNumber int
// ATN represents an “[Augmented Transition Network]”, though general in ANTLR the term
// “Augmented Recursive Transition Network” though there are some descriptions of “[Recursive Transition Network]”
// in existence.
//
// ATNs represent the main networks in the system and are serialized by the code generator and support [ALL(*)].
//
// [Augmented Transition Network]: https://en.wikipedia.org/wiki/Augmented_transition_network
// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
type ATN struct {
// DecisionToState is the decision points for all rules, subrules, optional
// blocks, ()+, ()*, etc. Used to build DFA predictors for them.
// blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we
// can go back later and build DFA predictors for them. This includes
// all the rules, subrules, optional blocks, ()+, ()* etc...
DecisionToState []DecisionState
// grammarType is the ATN type and is used for deserializing ATNs from strings.
@@ -45,6 +58,8 @@ type ATN struct {
edgeMu sync.RWMutex
}
// NewATN returns a new ATN struct representing the given grammarType and is used
// for runtime deserialization of ATNs from the code generated by the ANTLR tool
func NewATN(grammarType int, maxTokenType int) *ATN {
return &ATN{
grammarType: grammarType,
@@ -53,7 +68,7 @@ func NewATN(grammarType int, maxTokenType int) *ATN {
}
}
// NextTokensInContext computes the set of valid tokens that can occur starting
// NextTokensInContext computes and returns the set of valid tokens that can occur starting
// in state s. If ctx is nil, the set of tokens will not include what can follow
// the rule surrounding s. In other words, the set will be restricted to tokens
// reachable staying within the rule of s.
@@ -61,8 +76,8 @@ func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
return NewLL1Analyzer(a).Look(s, nil, ctx)
}
// NextTokensNoContext computes the set of valid tokens that can occur starting
// in s and staying in same rule. Token.EPSILON is in set if we reach end of
// NextTokensNoContext computes and returns the set of valid tokens that can occur starting
// in state s and staying in same rule. [antlr.Token.EPSILON] is in set if we reach end of
// rule.
func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
a.mu.Lock()
@@ -76,6 +91,8 @@ func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
return iset
}
// NextTokens computes and returns the set of valid tokens starting in state s, by
// calling either [NextTokensNoContext] (ctx == nil) or [NextTokensInContext] (ctx != nil).
func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
if ctx == nil {
return a.NextTokensNoContext(s)

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -8,19 +8,14 @@ import (
"fmt"
)
type comparable interface {
equals(other interface{}) bool
}
// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
// context). The syntactic context is a graph-structured stack node whose
// path(s) to the root is the rule invocation(s) chain used to arrive at the
// state. The semantic context is the tree of semantic predicates encountered
// before reaching an ATN state.
type ATNConfig interface {
comparable
hash() int
Equals(o Collectable[ATNConfig]) bool
Hash() int
GetState() ATNState
GetAlt() int
@@ -47,7 +42,7 @@ type BaseATNConfig struct {
reachesIntoOuterContext int
}
func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup
func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup
return &BaseATNConfig{
state: old.state,
alt: old.alt,
@@ -135,11 +130,16 @@ func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
b.reachesIntoOuterContext = v
}
// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
// for a collection.
//
// An ATN configuration is equal to another if both have the same state, they
// predict the same alternative, and syntactic/semantic contexts are the same.
func (b *BaseATNConfig) equals(o interface{}) bool {
func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool {
if b == o {
return true
} else if o == nil {
return false
}
var other, ok = o.(*BaseATNConfig)
@@ -153,30 +153,32 @@ func (b *BaseATNConfig) equals(o interface{}) bool {
if b.context == nil {
equal = other.context == nil
} else {
equal = b.context.equals(other.context)
equal = b.context.Equals(other.context)
}
var (
nums = b.state.GetStateNumber() == other.state.GetStateNumber()
alts = b.alt == other.alt
cons = b.semanticContext.equals(other.semanticContext)
cons = b.semanticContext.Equals(other.semanticContext)
sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
)
return nums && alts && cons && sups && equal
}
func (b *BaseATNConfig) hash() int {
// Hash is the default hash function for BaseATNConfig, when no specialist hash function
// is required for a collection
func (b *BaseATNConfig) Hash() int {
var c int
if b.context != nil {
c = b.context.hash()
c = b.context.Hash()
}
h := murmurInit(7)
h = murmurUpdate(h, b.state.GetStateNumber())
h = murmurUpdate(h, b.alt)
h = murmurUpdate(h, c)
h = murmurUpdate(h, b.semanticContext.hash())
h = murmurUpdate(h, b.semanticContext.Hash())
return murmurFinish(h, 4)
}
@@ -243,7 +245,9 @@ func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *Lex
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
}
func (l *LexerATNConfig) hash() int {
// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via
// the default comparator [ObjEqComparator].
func (l *LexerATNConfig) Hash() int {
var f int
if l.passedThroughNonGreedyDecision {
f = 1
@@ -253,15 +257,20 @@ func (l *LexerATNConfig) hash() int {
h := murmurInit(7)
h = murmurUpdate(h, l.state.GetStateNumber())
h = murmurUpdate(h, l.alt)
h = murmurUpdate(h, l.context.hash())
h = murmurUpdate(h, l.semanticContext.hash())
h = murmurUpdate(h, l.context.Hash())
h = murmurUpdate(h, l.semanticContext.Hash())
h = murmurUpdate(h, f)
h = murmurUpdate(h, l.lexerActionExecutor.hash())
h = murmurUpdate(h, l.lexerActionExecutor.Hash())
h = murmurFinish(h, 6)
return h
}
func (l *LexerATNConfig) equals(other interface{}) bool {
// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via
// the default comparator [ObjEqComparator].
func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool {
if l == other {
return true
}
var othert, ok = other.(*LexerATNConfig)
if l == other {
@@ -275,7 +284,7 @@ func (l *LexerATNConfig) equals(other interface{}) bool {
var b bool
if l.lexerActionExecutor != nil {
b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor)
b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor)
} else {
b = othert.lexerActionExecutor != nil
}
@@ -284,10 +293,9 @@ func (l *LexerATNConfig) equals(other interface{}) bool {
return false
}
return l.BaseATNConfig.equals(othert.BaseATNConfig)
return l.BaseATNConfig.Equals(othert.BaseATNConfig)
}
func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
var ds, ok = target.(DecisionState)

View File

@@ -1,24 +1,25 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import "fmt"
import (
"fmt"
)
type ATNConfigSet interface {
hash() int
Hash() int
Equals(o Collectable[ATNConfig]) bool
Add(ATNConfig, *DoubleDict) bool
AddAll([]ATNConfig) bool
GetStates() Set
GetStates() *JStore[ATNState, Comparator[ATNState]]
GetPredicates() []SemanticContext
GetItems() []ATNConfig
OptimizeConfigs(interpreter *BaseATNSimulator)
Equals(other interface{}) bool
Length() int
IsEmpty() bool
Contains(ATNConfig) bool
@@ -57,7 +58,7 @@ type BaseATNConfigSet struct {
// effectively doubles the number of objects associated with ATNConfigs. All
// keys are hashed by (s, i, _, pi), not including the context. Wiped out when
// read-only because a set becomes a DFA state.
configLookup Set
configLookup *JStore[ATNConfig, Comparator[ATNConfig]]
// configs is the added elements.
configs []ATNConfig
@@ -83,7 +84,7 @@ type BaseATNConfigSet struct {
// readOnly is whether it is read-only. Do not
// allow any code to manipulate the set if true because DFA states will point at
// sets and those must not change. It not protect other fields; conflictingAlts
// sets and those must not change. It not, protect other fields; conflictingAlts
// in particular, which is assigned after readOnly.
readOnly bool
@@ -104,7 +105,7 @@ func (b *BaseATNConfigSet) Alts() *BitSet {
func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
return &BaseATNConfigSet{
cachedHash: -1,
configLookup: newArray2DHashSetWithCap(hashATNConfig, equalATNConfigs, 16, 2),
configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst),
fullCtx: fullCtx,
}
}
@@ -126,9 +127,11 @@ func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
b.dipsIntoOuterContext = true
}
existing := b.configLookup.Add(config).(ATNConfig)
existing, present := b.configLookup.Put(config)
if existing == config {
// The config was not already in the set
//
if !present {
b.cachedHash = -1
b.configs = append(b.configs, config) // Track order here
return true
@@ -154,11 +157,14 @@ func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
return true
}
func (b *BaseATNConfigSet) GetStates() Set {
states := newArray2DHashSet(nil, nil)
func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
// states uses the standard comparator provided by the ATNState instance
//
states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst)
for i := 0; i < len(b.configs); i++ {
states.Add(b.configs[i].GetState())
states.Put(b.configs[i].GetState())
}
return states
@@ -214,7 +220,34 @@ func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
return false
}
func (b *BaseATNConfigSet) Equals(other interface{}) bool {
// Compare is a hack function just to verify that adding DFAstares to the known
// set works, so long as comparison of ATNConfigSet s works. For that to work, we
// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't
// know the order, so we do this inefficient hack. If this proves the point, then
// we can change the config set to a better structure.
func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool {
if len(b.configs) != len(bs.configs) {
return false
}
for _, c := range b.configs {
found := false
for _, c2 := range bs.configs {
if c.Equals(c2) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
if b == other {
return true
} else if _, ok := other.(*BaseATNConfigSet); !ok {
@@ -224,15 +257,15 @@ func (b *BaseATNConfigSet) Equals(other interface{}) bool {
other2 := other.(*BaseATNConfigSet)
return b.configs != nil &&
// TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary?
b.fullCtx == other2.fullCtx &&
b.uniqueAlt == other2.uniqueAlt &&
b.conflictingAlts == other2.conflictingAlts &&
b.hasSemanticContext == other2.hasSemanticContext &&
b.dipsIntoOuterContext == other2.dipsIntoOuterContext
b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
b.Compare(other2)
}
func (b *BaseATNConfigSet) hash() int {
func (b *BaseATNConfigSet) Hash() int {
if b.readOnly {
if b.cachedHash == -1 {
b.cachedHash = b.hashCodeConfigs()
@@ -247,7 +280,7 @@ func (b *BaseATNConfigSet) hash() int {
func (b *BaseATNConfigSet) hashCodeConfigs() int {
h := 1
for _, config := range b.configs {
h = 31*h + config.hash()
h = 31*h + config.Hash()
}
return h
}
@@ -283,7 +316,7 @@ func (b *BaseATNConfigSet) Clear() {
b.configs = make([]ATNConfig, 0)
b.cachedHash = -1
b.configLookup = newArray2DHashSet(nil, equalATNConfigs)
b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
}
func (b *BaseATNConfigSet) FullContext() bool {
@@ -365,7 +398,8 @@ type OrderedATNConfigSet struct {
func NewOrderedATNConfigSet() *OrderedATNConfigSet {
b := NewBaseATNConfigSet(false)
b.configLookup = newArray2DHashSet(nil, nil)
// This set uses the standard Hash() and Equals() from ATNConfig
b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
return &OrderedATNConfigSet{BaseATNConfigSet: b}
}
@@ -375,7 +409,7 @@ func hashATNConfig(i interface{}) int {
hash := 7
hash = 31*hash + o.GetState().GetStateNumber()
hash = 31*hash + o.GetAlt()
hash = 31*hash + o.GetSemanticContext().hash()
hash = 31*hash + o.GetSemanticContext().Hash()
return hash
}
@@ -403,5 +437,5 @@ func equalATNConfigs(a, b interface{}) bool {
return false
}
return ai.GetSemanticContext().equals(bi.GetSemanticContext())
return ai.GetSemanticContext().Equals(bi.GetSemanticContext())
}

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -49,7 +49,8 @@ type ATNState interface {
AddTransition(Transition, int)
String() string
hash() int
Hash() int
Equals(Collectable[ATNState]) bool
}
type BaseATNState struct {
@@ -123,7 +124,7 @@ func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
as.NextTokenWithinRule = v
}
func (as *BaseATNState) hash() int {
func (as *BaseATNState) Hash() int {
return as.stateNumber
}
@@ -131,7 +132,7 @@ func (as *BaseATNState) String() string {
return strconv.Itoa(as.stateNumber)
}
func (as *BaseATNState) equals(other interface{}) bool {
func (as *BaseATNState) Equals(other Collectable[ATNState]) bool {
if ot, ok := other.(ATNState); ok {
return as.stateNumber == ot.GetStateNumber()
}

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -331,10 +331,12 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string
func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
c.lazyInit()
c.Fill()
if interval == nil {
c.Fill()
interval = NewInterval(0, len(c.tokens)-1)
} else {
c.Sync(interval.Stop)
}
start := interval.Start

View File

@@ -0,0 +1,147 @@
package antlr
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
// This file contains all the implementations of custom comparators used for generic collections when the
// Hash() and Equals() funcs supplied by the struct objects themselves need to be overridden. Normally, we would
// put the comparators in the source file for the struct themselves, but given the organization of this code is
// sorta kinda based upon the Java code, I found it confusing trying to find out which comparator was where and used by
// which instantiation of a collection. For instance, an Array2DHashSet in the Java source, when used with ATNConfig
// collections requires three different comparators depending on what the collection is being used for. Collecting - pun intended -
// all the comparators here, makes it much easier to see which implementation of hash and equals is used by which collection.
// It also makes it easy to verify that the Hash() and Equals() functions marry up with the Java implementations.
// ObjEqComparator is the equivalent of the Java ObjectEqualityComparator, which is the default instance of
// Equality comparator. We do not have inheritance in Go, only interfaces, so we use generics to enforce some
// type safety and avoid having to implement this for every type that we want to perform comparison on.
//
// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which
// allows us to use it in any collection instance that does nto require a special hash or equals implementation.
type ObjEqComparator[T Collectable[T]] struct{}
var (
aStateEqInst = &ObjEqComparator[ATNState]{}
aConfEqInst = &ObjEqComparator[ATNConfig]{}
aConfCompInst = &ATNConfigComparator[ATNConfig]{}
atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{}
dfaStateEqInst = &ObjEqComparator[*DFAState]{}
semctxEqInst = &ObjEqComparator[SemanticContext]{}
atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{}
)
// Equals2 delegates to the Equals() method of type T
func (c *ObjEqComparator[T]) Equals2(o1, o2 T) bool {
return o1.Equals(o2)
}
// Hash1 delegates to the Hash() method of type T
func (c *ObjEqComparator[T]) Hash1(o T) int {
return o.Hash()
}
type SemCComparator[T Collectable[T]] struct{}
// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet
// and has a custom Equals() and Hash() implementation, because equality is not based on the
// standard Hash() and Equals() methods of the ATNConfig type.
type ATNConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
if o1 == o2 {
return true
}
// If either are nil, but not both, then the result is false
//
if o1 == nil || o2 == nil {
return false
}
return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
o1.GetAlt() == o2.GetAlt() &&
o1.GetSemanticContext().Equals(o2.GetSemanticContext())
}
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int {
hash := 7
hash = 31*hash + o.GetState().GetStateNumber()
hash = 31*hash + o.GetAlt()
hash = 31*hash + o.GetSemanticContext().Hash()
return hash
}
// ATNAltConfigComparator is used as the comparator for mapping configs to Alt Bitsets
type ATNAltConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
if o1 == o2 {
return true
}
// If either are nil, but not both, then the result is false
//
if o1 == nil || o2 == nil {
return false
}
return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
o1.GetContext().Equals(o2.GetContext())
}
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int {
h := murmurInit(7)
h = murmurUpdate(h, o.GetState().GetStateNumber())
h = murmurUpdate(h, o.GetContext().Hash())
return murmurFinish(h, 2)
}
// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet
// and has a custom Equals() and Hash() implementation, because equality is not based on the
// standard Hash() and Equals() methods of the ATNConfig type.
type BaseATNConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet
func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
if o1 == o2 {
return true
}
// If either are nil, but not both, then the result is false
//
if o1 == nil || o2 == nil {
return false
}
return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() &&
o1.GetAlt() == o2.GetAlt() &&
o1.GetSemanticContext().Equals(o2.GetSemanticContext())
}
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just
// delegates to the standard Hash() method of the ATNConfig type.
func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int {
return o.Hash()
}

View File

@@ -1,13 +1,9 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
"sort"
)
type DFA struct {
// atnStartState is the ATN state in which this was created
atnStartState DecisionState
@@ -15,8 +11,15 @@ type DFA struct {
decision int
// states is all the DFA states. Use Map to get the old state back; Set can only
// indicate whether it is there.
states map[int]*DFAState
// indicate whether it is there. Go maps implement key hash collisions and so on and are very
// good, but the DFAState is an object and can't be used directly as the key as it can in say JAva
// amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
// to see if they really are the same object.
//
//
states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
numstates int
s0 *DFAState
@@ -29,7 +32,7 @@ func NewDFA(atnStartState DecisionState, decision int) *DFA {
dfa := &DFA{
atnStartState: atnStartState,
decision: decision,
states: make(map[int]*DFAState),
states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst),
}
if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
dfa.precedenceDfa = true
@@ -92,7 +95,8 @@ func (d *DFA) getPrecedenceDfa() bool {
// true or nil otherwise, and d.precedenceDfa is updated.
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
if d.getPrecedenceDfa() != precedenceDfa {
d.setStates(make(map[int]*DFAState))
d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst)
d.numstates = 0
if precedenceDfa {
precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
@@ -117,38 +121,12 @@ func (d *DFA) setS0(s *DFAState) {
d.s0 = s
}
func (d *DFA) getState(hash int) (*DFAState, bool) {
s, ok := d.states[hash]
return s, ok
}
func (d *DFA) setStates(states map[int]*DFAState) {
d.states = states
}
func (d *DFA) setState(hash int, state *DFAState) {
d.states[hash] = state
}
func (d *DFA) numStates() int {
return len(d.states)
}
type dfaStateList []*DFAState
func (d dfaStateList) Len() int { return len(d) }
func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber }
func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
// sortedStates returns the states in d sorted by their state number.
func (d *DFA) sortedStates() []*DFAState {
vs := make([]*DFAState, 0, len(d.states))
for _, v := range d.states {
vs = append(vs, v)
}
sort.Sort(dfaStateList(vs))
vs := d.states.SortedSlice(func(i, j *DFAState) bool {
return i.stateNumber < j.stateNumber
})
return vs
}

View File

@@ -1,4 +1,4 @@
// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.

Some files were not shown because too many files have changed in this diff Show More