1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 13:29:11 +01:00

Compare commits

..

21 Commits

Author SHA1 Message Date
Kubernetes Prow Robot
4d47bd7f36 Merge pull request #1335 from a7i/helm-v0.29.0
v0.29.0: bump helm chart and update manifests/docs
2024-01-02 19:00:04 +01:00
Amir Alavi
96657caf60 v0.29.0: bump helm chart and update manifests/docs
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2024-01-02 09:34:34 -05:00
Kubernetes Prow Robot
3cf2c03f69 Merge pull request #1336 from dongjiang1989/update-verify-gen
chores(verify-gen): fix verify-gen check
2024-01-02 14:59:51 +01:00
dongjiang1989
1715f4ff81 update verify gen
Signed-off-by: dongjiang1989 <dongjiang1989@126.com>
2024-01-02 16:11:36 +08:00
Kubernetes Prow Robot
3a631a3287 Merge pull request #1333 from a7i/CVE-2023-48795
`CVE-2023-48795`: bump k8s deps
2023-12-23 20:08:01 +01:00
Amir Alavi
4ee0740a13 github action: install correct go-version 2023-12-22 09:33:52 -05:00
Amir Alavi
992483d0fe CVE-2023-48795: bump k8s deps
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2023-12-22 08:48:37 -05:00
Kubernetes Prow Robot
8dc250892b Merge pull request #1322 from a7i/k8s-1.29
kubernetes 1.29: update deps and go-version
2023-12-22 09:53:06 +01:00
Amir Alavi
08356fc9fa bump golangci-lint 2023-12-21 16:47:40 -05:00
Amir Alavi
e5e931d333 kubernetes 1.29: update deps and go-version
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2023-12-20 23:15:07 -05:00
Kubernetes Prow Robot
835c6c9fa1 Merge pull request #1318 from a7i/podlifetime-imagepullbackoff
`PodLifeTime`: consider pods with container status `ImagePullBackOff`
2023-12-20 19:46:03 +01:00
Amir Alavi
8a06ed32e2 PodLifeTime: consider pods with container status ImagePullBackOff
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2023-12-18 13:41:06 -05:00
Kubernetes Prow Robot
14b67000d6 Merge pull request #1317 from SataQiu/fix-log-20231212
fix: topologySpreadConstraint fields in structured logs
2023-12-15 17:14:15 +01:00
Kubernetes Prow Robot
dbfcd19713 Merge pull request #1314 from dongjiang1989/dualstack-support
helm: allow configuration of ipFamilyPolicy
2023-12-12 14:54:17 +01:00
SataQiu
f598b8909a fix a bug where the structured log is not displayed correctly 2023-12-12 16:45:50 +08:00
dongjiang1989
e6c9dfa05d allow configuration of ipFamilyPolicy
Signed-off-by: dongjiang1989 <dongjiang1989@126.com>
2023-12-10 09:47:54 +08:00
Victor Gonzalez
f0f7ebbe9a helm: ability to specify security context for pod (#1311)
* helm: ability to specify security context for pod

* Update charts/descheduler/templates/cronjob.yaml

Co-authored-by: Amir Alavi <amiralavi7@gmail.com>

* Update charts/descheduler/templates/deployment.yaml

Co-authored-by: Amir Alavi <amiralavi7@gmail.com>

---------

Co-authored-by: Amir Alavi <amiralavi7@gmail.com>
2023-12-05 04:40:39 +01:00
Kubernetes Prow Robot
4d37ec91a2 Merge pull request #1305 from a7i/helm-v0.28.1-master
release v0.28.1: bump helm chart and images
2023-11-28 18:13:52 +01:00
Amir Alavi
0286871a56 release v0.28.1: bump helm chart and images
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2023-11-28 09:10:03 -05:00
Kubernetes Prow Robot
b2965c0724 Merge pull request #1292 from a7i/amir/k8s-0.28.4
chore: upgrade libs to kubernetes 0.28.4 and matching go version
2023-11-27 20:04:02 +01:00
Amir Alavi
89c7ddbaab chore: upgrade libs to kubernetes 0.28.4 and matching go version
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2023-11-27 10:09:04 -05:00
710 changed files with 29980 additions and 22246 deletions

View File

@@ -35,7 +35,7 @@ jobs:
- uses: actions/setup-go@v3
with:
go-version: '1.20.7'
go-version: '1.21.5'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.2.1

View File

@@ -5,10 +5,10 @@ on:
jobs:
deploy:
strategy:
strategy:
matrix:
k8s-version: ["v1.28.0"]
descheduler-version: ["v0.28.1"]
k8s-version: ["v1.29.0"]
descheduler-version: ["v0.29.0"]
descheduler-api: ["v1alpha1", "v1alpha2"]
manifest: ["deployment"]
runs-on: ubuntu-latest
@@ -21,6 +21,10 @@ jobs:
node_image: kindest/node:${{ matrix.k8s-version }}
kubectl_version: ${{ matrix.k8s-version }}
config: test/kind-config.yaml
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
cache: true
- name: Build image
run: |
VERSION="dev" make dev-image

View File

@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.20.7
FROM golang:1.21.5
WORKDIR /go/src/sigs.k8s.io/descheduler
COPY . .

View File

@@ -26,7 +26,7 @@ ARCHS = amd64 arm arm64
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
GOLANGCI_VERSION := v1.52.1
GOLANGCI_VERSION := v1.55.2
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
GOFUMPT_VERSION := v0.4.0
@@ -134,7 +134,7 @@ gen:
./hack/update-docs.sh
gen-docker:
$(CONTAINER_ENGINE) run --entrypoint make -it -v $(CURRENT_DIR):/go/src/sigs.k8s.io/descheduler -w /go/src/sigs.k8s.io/descheduler golang:1.20.7 gen
$(CONTAINER_ENGINE) run --entrypoint make -it -v $(CURRENT_DIR):/go/src/sigs.k8s.io/descheduler -w /go/src/sigs.k8s.io/descheduler golang:1.21.5 gen
verify-gen:
./hack/verify-conversions.sh

View File

@@ -38,6 +38,7 @@ that version's release branch, as listed below:
|Descheduler Version|Docs link|
|---|---|
|v0.29.x|[`release-1.29`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.29/README.md)|
|v0.28.x|[`release-1.28`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.28/README.md)|
|v0.27.x|[`release-1.27`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.27/README.md)|
|v0.26.x|[`release-1.26`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.26/README.md)|
@@ -637,7 +638,7 @@ This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
You can also specify `states` parameter to **only** evict pods matching the following conditions:
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`, `ImagePullBackOff`
If a value for `states` or `podStatusPhases` is not specified,
Pods in any state (even `Running`) are considered for eviction.

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: descheduler
version: 0.28.1
appVersion: 0.28.1
version: 0.29.0
appVersion: 0.29.0
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
keywords:
- kubernetes

View File

@@ -88,6 +88,10 @@ spec:
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
{{- if .Values.podSecurityContext }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 12 }}
{{- end }}
volumes:
- name: policy-volume
configMap:

View File

@@ -68,6 +68,10 @@ spec:
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
{{- if .Values.podSecurityContext }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
{{- end }}
volumes:
- name: policy-volume
configMap:

View File

@@ -9,6 +9,12 @@ metadata:
namespace: {{ .Release.Namespace }}
spec:
clusterIP: None
{{- if .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
{{- end }}
{{- if .Values.service.ipFamilies }}
ipFamilies: {{ toYaml .Values.service.ipFamilies | nindent 4 }}
{{- end }}
ports:
- name: http-metrics
port: 10258

View File

@@ -32,6 +32,10 @@ securityContext:
runAsNonRoot: true
runAsUser: 1000
# podSecurityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
podSecurityContext: {}
# fsGroup: 1000
nameOverride: ""
fullnameOverride: ""
@@ -185,6 +189,16 @@ livenessProbe:
service:
enabled: false
# @param service.ipFamilyPolicy [string], support SingleStack, PreferDualStack and RequireDualStack
#
ipFamilyPolicy: ""
# @param service.ipFamilies [array] List of IP families (e.g. IPv4, IPv6) assigned to the service.
# Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
# E.g.
# ipFamilies:
# - IPv6
# - IPv4
ipFamilies: []
serviceMonitor:
enabled: false

View File

@@ -109,17 +109,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
Run As A Job
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.28.1' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.29.0' | kubectl apply -f -
```
Run As A CronJob
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.28.1' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.29.0' | kubectl apply -f -
```
Run As A Deployment
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.28.1' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.29.0' | kubectl apply -f -
```
## User Guide

View File

@@ -4,6 +4,7 @@ Starting with descheduler release v0.10.0 container images are available in the
Descheduler Version | Container Image | Architectures |
------------------- |-------------------------------------------------|-------------------------|
v0.29.0 | registry.k8s.io/descheduler/descheduler:v0.29.0 | AMD64<br>ARM64<br>ARMv7 |
v0.28.1 | registry.k8s.io/descheduler/descheduler:v0.28.1 | AMD64<br>ARM64<br>ARMv7 |
v0.28.0 | registry.k8s.io/descheduler/descheduler:v0.28.0 | AMD64<br>ARM64<br>ARMv7 |
v0.27.1 | registry.k8s.io/descheduler/descheduler:v0.27.1 | AMD64<br>ARM64<br>ARMv7 |

76
go.mod
View File

@@ -1,6 +1,6 @@
module sigs.k8s.io/descheduler
go 1.20
go 1.21
require (
github.com/client9/misspell v0.3.4
@@ -12,16 +12,16 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0
go.opentelemetry.io/otel/sdk v1.21.0
go.opentelemetry.io/otel/trace v1.21.0
google.golang.org/grpc v1.59.0
k8s.io/api v0.28.4
k8s.io/apimachinery v0.28.4
k8s.io/apiserver v0.28.4
k8s.io/client-go v0.28.4
k8s.io/code-generator v0.28.4
k8s.io/component-base v0.28.4
k8s.io/component-helpers v0.28.4
google.golang.org/grpc v1.60.1
k8s.io/api v0.29.0
k8s.io/apimachinery v0.29.0
k8s.io/apiserver v0.29.0
k8s.io/client-go v0.29.0
k8s.io/code-generator v0.29.0
k8s.io/component-base v0.29.0
k8s.io/component-helpers v0.29.0
k8s.io/klog/v2 v2.110.1
k8s.io/utils v0.0.0-20231121161247-cf03d44ff3cf
k8s.io/utils v0.0.0-20231127182322-b307cd553661
sigs.k8s.io/mdtoc v1.1.0
)
@@ -38,10 +38,10 @@ require (
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.2.3 // indirect
@@ -52,7 +52,7 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
github.com/google/cel-go v0.16.1 // indirect
github.com/google/cel-go v0.17.7 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.3.1 // indirect
@@ -75,9 +75,9 @@ require (
github.com/prometheus/procfs v0.10.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.9 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
go.etcd.io/etcd/client/v3 v3.5.9 // indirect
go.etcd.io/etcd/api/v3 v3.5.10 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect
go.etcd.io/etcd/client/v3 v3.5.10 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect
@@ -85,33 +85,41 @@ require (
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.19.0 // indirect
golang.org/x/crypto v0.15.0 // indirect
golang.org/x/crypto v0.16.0 // indirect
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.18.0 // indirect
golang.org/x/oauth2 v0.11.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.14.0 // indirect
golang.org/x/term v0.14.0 // indirect
golang.org/x/mod v0.14.0 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/oauth2 v0.13.0 // indirect
golang.org/x/sync v0.5.0 // indirect
golang.org/x/sys v0.15.0 // indirect
golang.org/x/term v0.15.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.8.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
golang.org/x/tools v0.16.1 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
k8s.io/kms v0.28.4 // indirect
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect
k8s.io/kms v0.29.0 // indirect
k8s.io/kube-openapi v0.0.0-20231113174909-778a5567bc1e // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
replace go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1
replace (
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1
// CVE-2023-48795: https://github.com/kubernetes-sigs/descheduler/security/code-scanning/17
// TODO: remove this replace once the upstream dependency is updated to v0.29.1
k8s.io/api => k8s.io/api v0.0.0-20231220172311-84c476802242
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20231220171733-60eaa653342b
k8s.io/client-go => k8s.io/client-go v0.0.0-20231220173006-5a0a4247921d
)

196
go.sum
View File

@@ -1,6 +1,8 @@
cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o=
cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME=
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
@@ -22,6 +24,7 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
@@ -33,15 +36,17 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
@@ -57,27 +62,32 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6XpZ4JqolL5u642Js5Z0n7psPVl+S5heo=
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo=
github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ=
github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -85,13 +95,17 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
@@ -99,6 +113,7 @@ github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
@@ -109,6 +124,7 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -126,8 +142,10 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs=
github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -142,10 +160,13 @@ github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@@ -163,21 +184,30 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs=
go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE=
go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo=
go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E=
go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA=
go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ=
go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI=
go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k=
go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI=
go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0=
go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U=
go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4=
go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA=
go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao=
go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc=
go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM=
go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs=
go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA=
go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc=
go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg=
go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
@@ -201,6 +231,7 @@ go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
@@ -210,42 +241,52 @@ golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90te
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY=
golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
@@ -257,22 +298,23 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0=
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk=
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU=
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY=
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
@@ -294,39 +336,39 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY=
k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0=
k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8=
k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg=
k8s.io/apiserver v0.28.4 h1:BJXlaQbAU/RXYX2lRz+E1oPe3G3TKlozMMCZWu5GMgg=
k8s.io/apiserver v0.28.4/go.mod h1:Idq71oXugKZoVGUUL2wgBCTHbUR+FYTWa4rq9j4n23w=
k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY=
k8s.io/client-go v0.28.4/go.mod h1:0VDZFpgoZfelyP5Wqu0/r/TRYcLYuJ2U1KEeoaPa1N4=
k8s.io/code-generator v0.28.4 h1:tcOSNIZQvuAvXhOwpbuJkKbAABJQeyCcQBCN/3uI18c=
k8s.io/code-generator v0.28.4/go.mod h1:OQAfl6bZikQ/tK6faJ18Vyzo54rUII2NmjurHyiN1g4=
k8s.io/component-base v0.28.4 h1:c/iQLWPdUgI90O+T9TeECg8o7N3YJTiuz2sKxILYcYo=
k8s.io/component-base v0.28.4/go.mod h1:m9hR0uvqXDybiGL2nf/3Lf0MerAfQXzkfWhUY58JUbU=
k8s.io/component-helpers v0.28.4 h1:+X9VXT5+jUsRdC26JyMZ8Fjfln7mSjgumafocE509C4=
k8s.io/component-helpers v0.28.4/go.mod h1:8LzMalOQ0K10tkBJWBWq8h0HTI9HDPx4WT3QvTFn9Ro=
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08=
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/api v0.0.0-20231220172311-84c476802242 h1:0vL/RpEDBpP1Ib7D/+Gn1GcXRKoLfzHk39Bimd2kBNI=
k8s.io/api v0.0.0-20231220172311-84c476802242/go.mod h1:77+m1u5YUTiZfSm1ad6sO9fCphn5EYI5cFzfE0VKOuY=
k8s.io/apimachinery v0.0.0-20231220171733-60eaa653342b h1:Z+R5mA4fTuumBjSg84ZIY+W8Gi3zW50/iw58LL1tUwQ=
k8s.io/apimachinery v0.0.0-20231220171733-60eaa653342b/go.mod h1:HcS5UNnaHsno0i/Q7QQQzqFd5WS+W26bysH+Ez3FLxA=
k8s.io/apiserver v0.29.0 h1:Y1xEMjJkP+BIi0GSEv1BBrf1jLU9UPfAnnGGbbDdp7o=
k8s.io/apiserver v0.29.0/go.mod h1:31n78PsRKPmfpee7/l9NYEv67u6hOL6AfcE761HapDM=
k8s.io/client-go v0.0.0-20231220173006-5a0a4247921d h1:Y5RdiizB/2/3NM2YcasfigRdLYEkSa0AslQLcUQGDMw=
k8s.io/client-go v0.0.0-20231220173006-5a0a4247921d/go.mod h1:1ou2dbm4LO33nvBBLNRvuzwYldF1e1DAjhtXzRCjF6o=
k8s.io/code-generator v0.29.0 h1:2LQfayGDhaIlaamXjIjEQlCMy4JNCH9lrzas4DNW1GQ=
k8s.io/code-generator v0.29.0/go.mod h1:5bqIZoCxs2zTRKMWNYqyQWW/bajc+ah4rh0tMY8zdGA=
k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s=
k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M=
k8s.io/component-helpers v0.29.0 h1:Y8W70NGeitKxWwhsPo/vEQbQx5VqJV+3xfLpP3V1VxU=
k8s.io/component-helpers v0.29.0/go.mod h1:j2coxVfmzTOXWSE6sta0MTgNSr572Dcx68F6DD+8fWc=
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks=
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
k8s.io/kms v0.28.4 h1:PMgY/3CQTWP9eIKmNQiTgjLIZ0ns6O+voagzD2/4mSg=
k8s.io/kms v0.28.4/go.mod h1:HL4/lR/bhjAJPbqycKtfhWiKh1Sp21cpHOL8P4oo87w=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/utils v0.0.0-20231121161247-cf03d44ff3cf h1:iTzha1p7Fi83476ypNSz8nV9iR9932jIIs26F7gNLsU=
k8s.io/utils v0.0.0-20231121161247-cf03d44ff3cf/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0=
k8s.io/kms v0.29.0 h1:KJ1zaZt74CgvgV3NR7tnURJ/mJOKC5X3nwon/WdwgxI=
k8s.io/kms v0.29.0/go.mod h1:mB0f9HLxRXeXUfHfn1A7rpwOlzXI1gIWu86z6buNoYA=
k8s.io/kube-openapi v0.0.0-20231113174909-778a5567bc1e h1:snPmy96t93RredGRjKfMFt+gvxuVAncqSAyBveJtr4Q=
k8s.io/kube-openapi v0.0.0-20231113174909-778a5567bc1e/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI=
k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo=
sigs.k8s.io/mdtoc v1.1.0/go.mod h1:QZLVEdHH2iNIR4uHAZyvFRtjloHgVItk8lo/mzCtq3w=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

View File

@@ -1,9 +1,5 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
featureGates:
# beta as of 1.27 but we currently run e2e on 1.26
# this flag should be removed as part of Descheduler 0.29 release
MatchLabelKeysInPodTopologySpread: true
nodes:
- role: control-plane
- role: worker

View File

@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20') ]]; then
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20|go1.21') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi

View File

@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20') ]]; then
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20|go1.21') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi

View File

@@ -16,7 +16,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.28.1
image: registry.k8s.io/descheduler/descheduler:v0.29.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume

View File

@@ -19,7 +19,7 @@ spec:
serviceAccountName: descheduler-sa
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.28.1
image: registry.k8s.io/descheduler/descheduler:v0.29.0
imagePullPolicy: IfNotPresent
command:
- "/bin/descheduler"

View File

@@ -14,7 +14,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.28.1
image: registry.k8s.io/descheduler/descheduler:v0.29.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/events"
utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
@@ -149,6 +150,7 @@ func TestPodLifeTime(t *testing.T) {
ignorePvcPods bool
maxPodsToEvictPerNode *uint
maxPodsToEvictPerNamespace *uint
applyPodsFunc func(pods []*v1.Pod)
}{
{
description: "Two pods in the `dev` Namespace, 1 is new and 1 very is old. 1 should be evicted.",
@@ -303,7 +305,7 @@ func TestPodLifeTime(t *testing.T) {
},
pods: []*v1.Pod{p1, p2, p9},
nodes: []*v1.Node{node1},
maxPodsToEvictPerNamespace: func(i uint) *uint { return &i }(1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedEvictedPodCount: 1,
},
{
@@ -313,9 +315,47 @@ func TestPodLifeTime(t *testing.T) {
},
pods: []*v1.Pod{p1, p2, p9},
nodes: []*v1.Node{node1},
maxPodsToEvictPerNode: func(i uint) *uint { return &i }(1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
expectedEvictedPodCount: 1,
},
{
description: "1 pod with container status ImagePullBackOff should be evicted",
args: &PodLifeTimeArgs{
MaxPodLifeTimeSeconds: &maxLifeTime,
States: []string{"ImagePullBackOff"},
},
pods: []*v1.Pod{p9},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
applyPodsFunc: func(pods []*v1.Pod) {
pods[0].Status.ContainerStatuses = []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "ImagePullBackOff"},
},
},
}
},
},
{
description: "1 pod without ImagePullBackOff States should be ignored",
args: &PodLifeTimeArgs{
MaxPodLifeTimeSeconds: &maxLifeTime,
States: []string{"ContainerCreating"},
},
pods: []*v1.Pod{p9},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
applyPodsFunc: func(pods []*v1.Pod) {
pods[0].Status.ContainerStatuses = []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "ImagePullBackOff"},
},
},
}
},
},
}
for _, tc := range testCases {
@@ -323,6 +363,10 @@ func TestPodLifeTime(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if tc.applyPodsFunc != nil {
tc.applyPodsFunc(tc.pods)
}
var objs []runtime.Object
for _, node := range tc.nodes {
objs = append(objs, node)

View File

@@ -50,6 +50,9 @@ func ValidatePodLifeTimeArgs(obj runtime.Object) error {
// Container state reasons: https://github.com/kubernetes/kubernetes/blob/release-1.24/pkg/kubelet/kubelet_pods.go#L76-L79
"PodInitializing",
"ContainerCreating",
// containerStatuses[*].state.waiting.reason: ImagePullBackOff
"ImagePullBackOff",
)
if !podLifeTimeAllowedStates.HasAll(args.States...) {

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -53,14 +53,15 @@ type topology struct {
// topologySpreadConstraint is an internal version for v1.TopologySpreadConstraint
// and where the selector is parsed.
// This mirrors scheduler: https://github.com/kubernetes/kubernetes/blob/release-1.28/pkg/scheduler/framework/plugins/podtopologyspread/common.go#L37
// Fields are exported for structured logging.
type topologySpreadConstraint struct {
maxSkew int32
topologyKey string
selector labels.Selector
nodeAffinityPolicy v1.NodeInclusionPolicy
nodeTaintsPolicy v1.NodeInclusionPolicy
podNodeAffinity nodeaffinity.RequiredNodeAffinity
podTolerations []v1.Toleration
MaxSkew int32
TopologyKey string
Selector labels.Selector
NodeAffinityPolicy v1.NodeInclusionPolicy
NodeTaintsPolicy v1.NodeInclusionPolicy
PodNodeAffinity nodeaffinity.RequiredNodeAffinity
PodTolerations []v1.Toleration
}
// RemovePodsViolatingTopologySpreadConstraint evicts pods which violate their topology spread constraints
@@ -180,9 +181,9 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
// pre-populate the topologyPair map with all the topologies available from the nodeMap
// (we can't just build it from existing pods' nodes because a topology may have 0 pods)
for _, node := range nodeMap {
if val, ok := node.Labels[tsc.topologyKey]; ok {
if val, ok := node.Labels[tsc.TopologyKey]; ok {
if matchNodeInclusionPolicies(tsc, node) {
constraintTopologies[topologyPair{key: tsc.topologyKey, value: val}] = make([]*v1.Pod, 0)
constraintTopologies[topologyPair{key: tsc.TopologyKey, value: val}] = make([]*v1.Pod, 0)
}
}
}
@@ -196,7 +197,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
continue
}
// 4. if the pod matches this TopologySpreadConstraint LabelSelector
if !tsc.selector.Matches(labels.Set(pod.Labels)) {
if !tsc.Selector.Matches(labels.Set(pod.Labels)) {
continue
}
@@ -206,12 +207,12 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) Balance(ctx context.Contex
// If ok is false, node is nil in which case node.Labels will panic. In which case a pod is yet to be scheduled. So it's safe to just continue here.
continue
}
nodeValue, ok := node.Labels[tsc.topologyKey]
nodeValue, ok := node.Labels[tsc.TopologyKey]
if !ok {
continue
}
// 6. create a topoPair with key as this TopologySpreadConstraint
topoPair := topologyPair{key: tsc.topologyKey, value: nodeValue}
topoPair := topologyPair{key: tsc.TopologyKey, value: nodeValue}
// 7. add the pod with key as this topoPair
constraintTopologies[topoPair] = append(constraintTopologies[topoPair], pod)
sumPods++
@@ -266,7 +267,7 @@ func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, tsc topologySpreadC
if len(pods) > maxDomainSize {
maxDomainSize = len(pods)
}
if int32(maxDomainSize-minDomainSize) > tsc.maxSkew {
if int32(maxDomainSize-minDomainSize) > tsc.MaxSkew {
return false
}
}
@@ -307,7 +308,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) balanceDomains(
topologyBalanceNodeFit := utilpointer.BoolDeref(d.args.TopologyBalanceNodeFit, true)
eligibleNodes := filterEligibleNodes(nodes, tsc)
nodesBelowIdealAvg := filterNodesBelowIdealAvg(eligibleNodes, sortedDomains, tsc.topologyKey, idealAvg)
nodesBelowIdealAvg := filterNodesBelowIdealAvg(eligibleNodes, sortedDomains, tsc.TopologyKey, idealAvg)
// i is the index for belowOrEqualAvg
// j is the index for aboveAvg
@@ -323,7 +324,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) balanceDomains(
skew := float64(len(sortedDomains[j].pods) - len(sortedDomains[i].pods))
// if k and j are within the maxSkew of each other, move to next belowOrEqualAvg
if int32(skew) <= tsc.maxSkew {
if int32(skew) <= tsc.MaxSkew {
i++
continue
}
@@ -337,7 +338,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) balanceDomains(
aboveAvg := math.Ceil(float64(len(sortedDomains[j].pods)) - idealAvg)
belowAvg := math.Ceil(idealAvg - float64(len(sortedDomains[i].pods)))
smallestDiff := math.Min(aboveAvg, belowAvg)
halfSkew := math.Ceil((skew - float64(tsc.maxSkew)) / 2)
halfSkew := math.Ceil((skew - float64(tsc.MaxSkew)) / 2)
movePods := int(math.Min(smallestDiff, halfSkew))
if movePods <= 0 {
i++
@@ -475,15 +476,15 @@ func filterEligibleNodes(nodes []*v1.Node, tsc topologySpreadConstraint) []*v1.N
}
func matchNodeInclusionPolicies(tsc topologySpreadConstraint, node *v1.Node) bool {
if tsc.nodeAffinityPolicy == v1.NodeInclusionPolicyHonor {
if tsc.NodeAffinityPolicy == v1.NodeInclusionPolicyHonor {
// We ignore parsing errors here for backwards compatibility.
if match, _ := tsc.podNodeAffinity.Match(node); !match {
if match, _ := tsc.PodNodeAffinity.Match(node); !match {
return false
}
}
if tsc.nodeTaintsPolicy == v1.NodeInclusionPolicyHonor {
if _, untolerated := v1helper.FindMatchingUntoleratedTaint(node.Spec.Taints, tsc.podTolerations, doNotScheduleTaintsFilterFunc()); untolerated {
if tsc.NodeTaintsPolicy == v1.NodeInclusionPolicyHonor {
if _, untolerated := v1helper.FindMatchingUntoleratedTaint(node.Spec.Taints, tsc.PodTolerations, doNotScheduleTaintsFilterFunc()); untolerated {
return false
}
}
@@ -510,19 +511,19 @@ func newTopologySpreadConstraint(constraint v1.TopologySpreadConstraint, pod *v1
}
tsc := topologySpreadConstraint{
maxSkew: constraint.MaxSkew,
topologyKey: constraint.TopologyKey,
selector: selector,
nodeAffinityPolicy: v1.NodeInclusionPolicyHonor, // If NodeAffinityPolicy is nil, we treat NodeAffinityPolicy as "Honor".
nodeTaintsPolicy: v1.NodeInclusionPolicyIgnore, // If NodeTaintsPolicy is nil, we treat NodeTaintsPolicy as "Ignore".
podNodeAffinity: nodeaffinity.GetRequiredNodeAffinity(pod),
podTolerations: pod.Spec.Tolerations,
MaxSkew: constraint.MaxSkew,
TopologyKey: constraint.TopologyKey,
Selector: selector,
NodeAffinityPolicy: v1.NodeInclusionPolicyHonor, // If NodeAffinityPolicy is nil, we treat NodeAffinityPolicy as "Honor".
NodeTaintsPolicy: v1.NodeInclusionPolicyIgnore, // If NodeTaintsPolicy is nil, we treat NodeTaintsPolicy as "Ignore".
PodNodeAffinity: nodeaffinity.GetRequiredNodeAffinity(pod),
PodTolerations: pod.Spec.Tolerations,
}
if constraint.NodeAffinityPolicy != nil {
tsc.nodeAffinityPolicy = *constraint.NodeAffinityPolicy
tsc.NodeAffinityPolicy = *constraint.NodeAffinityPolicy
}
if constraint.NodeTaintsPolicy != nil {
tsc.nodeTaintsPolicy = *constraint.NodeTaintsPolicy
tsc.NodeTaintsPolicy = *constraint.NodeTaintsPolicy
}
return tsc, nil

View File

@@ -1608,20 +1608,20 @@ func TestCheckIdenticalConstraints(t *testing.T) {
selector, _ := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}})
newConstraintSame := topologySpreadConstraint{
maxSkew: 2,
topologyKey: "zone",
selector: selector.DeepCopySelector(),
MaxSkew: 2,
TopologyKey: "zone",
Selector: selector.DeepCopySelector(),
}
newConstraintDifferent := topologySpreadConstraint{
maxSkew: 3,
topologyKey: "node",
selector: selector.DeepCopySelector(),
MaxSkew: 3,
TopologyKey: "node",
Selector: selector.DeepCopySelector(),
}
namespaceTopologySpreadConstraint := []topologySpreadConstraint{
{
maxSkew: 2,
topologyKey: "zone",
selector: selector.DeepCopySelector(),
MaxSkew: 2,
TopologyKey: "zone",
Selector: selector.DeepCopySelector(),
},
}
testCases := []struct {

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -25,7 +25,7 @@ SKIP_INSTALL=${SKIP_INSTALL:-}
if [ -n "$KIND_E2E" ]; then
# If we did not set SKIP_INSTALL
if [ -z "$SKIP_INSTALL" ]; then
K8S_VERSION=${KUBERNETES_VERSION:-v1.28.0}
K8S_VERSION=${KUBERNETES_VERSION:-v1.29.0}
curl -Lo kubectl https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && mv kubectl /usr/local/bin/
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.20.0/kind-linux-amd64
chmod +x kind-linux-amd64

View File

@@ -1,10 +1,30 @@
# Change history of go-restful
## [v3.9.0] - 20221-07-21
## [v3.11.0] - 2023-08-19
- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled.
## [v3.10.2] - 2023-03-09 - DO NOT USE
- introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0
see comment in Readme how to customize this behaviour.
## [v3.10.1] - 2022-11-19 - DO NOT USE
- fix broken 3.10.0 by using path package for joining paths
## [v3.10.0] - 2022-10-11 - BROKEN
- changed tokenizer to match std route match behavior; do not trimright the path (#511)
- Add MIME_ZIP (#512)
- Add MIME_ZIP and HEADER_ContentDisposition (#513)
- Changed how to get query parameter issue #510
## [v3.9.0] - 2022-07-21
- add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci)
## [v3.8.0] - 20221-06-06
## [v3.8.0] - 2022-06-06
- use exact matching of allowed domain entries, issue #489 (#493)
- this changes fixes [security] Authorization Bypass Through User-Controlled Key

View File

@@ -79,7 +79,7 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo
- Content encoding (gzip,deflate) of request and response payloads
- Automatic responses on OPTIONS (using a filter)
- Automatic CORS request handling (using a filter)
- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12))
- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi))
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
- Configurable (trace) logging
@@ -96,6 +96,7 @@ There are several hooks to customize the behavior of the go-restful package.
- Compression
- Encoders for other serializers
- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .`
- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/`
## Resources
@@ -108,4 +109,4 @@ There are several hooks to customize the behavior of the go-restful package.
Type ```git shortlog -s``` for a full list of contributors.
© 2012 - 2022, http://ernestmicklei.com. MIT License. Contributions are welcome.
© 2012 - 2023, http://ernestmicklei.com. MIT License. Contributions are welcome.

View File

@@ -7,12 +7,14 @@ package restful
const (
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
MIME_ZIP = "application/zip" // Accept or Content-Type used in Consumes() and/or Produces()
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
HEADER_Allow = "Allow"
HEADER_Accept = "Accept"
HEADER_Origin = "Origin"
HEADER_ContentType = "Content-Type"
HEADER_ContentDisposition = "Content-Disposition"
HEADER_LastModified = "Last-Modified"
HEADER_AcceptEncoding = "Accept-Encoding"
HEADER_ContentEncoding = "Content-Encoding"

View File

@@ -31,7 +31,8 @@ func NewRequest(httpRequest *http.Request) *Request {
// a "Unable to unmarshal content of type:" response is returned.
// Valid values are restful.MIME_JSON and restful.MIME_XML
// Example:
// restful.DefaultRequestContentType(restful.MIME_JSON)
//
// restful.DefaultRequestContentType(restful.MIME_JSON)
func DefaultRequestContentType(mime string) {
defaultRequestContentType = mime
}
@@ -48,7 +49,7 @@ func (r *Request) PathParameters() map[string]string {
// QueryParameter returns the (first) Query parameter value by its name
func (r *Request) QueryParameter(name string) string {
return r.Request.FormValue(name)
return r.Request.URL.Query().Get(name)
}
// QueryParameters returns the all the query parameters values by name

View File

@@ -109,6 +109,9 @@ func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
if DefaultResponseMimeType == MIME_XML {
return entityAccessRegistry.accessorAt(MIME_XML)
}
if DefaultResponseMimeType == MIME_ZIP {
return entityAccessRegistry.accessorAt(MIME_ZIP)
}
// Fallback to whatever the route says it can produce.
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
for _, each := range r.routeProduces {

View File

@@ -40,7 +40,8 @@ type Route struct {
ParameterDocs []*Parameter
ResponseErrors map[int]ResponseError
DefaultResponse *ResponseError
ReadSample, WriteSample interface{} // structs that model an example request or response payload
ReadSample, WriteSample interface{} // structs that model an example request or response payload
WriteSamples []interface{} // if more than one return types is possible (oneof) then this will contain multiple values
// Extra information used to store custom information about the route.
Metadata map[string]interface{}
@@ -164,7 +165,13 @@ func tokenizePath(path string) []string {
if "/" == path {
return nil
}
return strings.Split(strings.Trim(path, "/"), "/")
if TrimRightSlashEnabled {
// 3.9.0
return strings.Split(strings.Trim(path, "/"), "/")
} else {
// 3.10.2
return strings.Split(strings.TrimLeft(path, "/"), "/")
}
}
// for debugging
@@ -176,3 +183,9 @@ func (r *Route) String() string {
func (r *Route) EnableContentEncoding(enabled bool) {
r.contentEncodingEnabled = &enabled
}
// TrimRightSlashEnabled controls whether
// - path on route building is using path.Join
// - the path of the incoming request is trimmed of its slash suffux.
// Value of true matches the behavior of <= 3.9.0
var TrimRightSlashEnabled = true

View File

@@ -7,6 +7,7 @@ package restful
import (
"fmt"
"os"
"path"
"reflect"
"runtime"
"strings"
@@ -30,27 +31,29 @@ type RouteBuilder struct {
typeNameHandleFunc TypeNameHandleFunction // required
// documentation
doc string
notes string
operation string
readSample, writeSample interface{}
parameters []*Parameter
errorMap map[int]ResponseError
defaultResponse *ResponseError
metadata map[string]interface{}
extensions map[string]interface{}
deprecated bool
contentEncodingEnabled *bool
doc string
notes string
operation string
readSample interface{}
writeSamples []interface{}
parameters []*Parameter
errorMap map[int]ResponseError
defaultResponse *ResponseError
metadata map[string]interface{}
extensions map[string]interface{}
deprecated bool
contentEncodingEnabled *bool
}
// Do evaluates each argument with the RouteBuilder itself.
// This allows you to follow DRY principles without breaking the fluent programming style.
// Example:
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
//
// func Returns500(b *RouteBuilder) {
// b.Returns(500, "Internal Server Error", restful.ServiceError{})
// }
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
//
// func Returns500(b *RouteBuilder) {
// b.Returns(500, "Internal Server Error", restful.ServiceError{})
// }
func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
for _, each := range oneArgBlocks {
each(b)
@@ -133,9 +136,9 @@ func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
return p
}
// Writes tells what resource type will be written as the response payload. Optional.
func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
b.writeSample = sample
// Writes tells which one of the resource types will be written as the response payload. Optional.
func (b *RouteBuilder) Writes(samples ...interface{}) *RouteBuilder {
b.writeSamples = samples // oneof
return b
}
@@ -340,19 +343,29 @@ func (b *RouteBuilder) Build() Route {
ResponseErrors: b.errorMap,
DefaultResponse: b.defaultResponse,
ReadSample: b.readSample,
WriteSample: b.writeSample,
WriteSamples: b.writeSamples,
Metadata: b.metadata,
Deprecated: b.deprecated,
contentEncodingEnabled: b.contentEncodingEnabled,
allowedMethodsWithoutContentType: b.allowedMethodsWithoutContentType,
}
// set WriteSample if one specified
if len(b.writeSamples) == 1 {
route.WriteSample = b.writeSamples[0]
}
route.Extensions = b.extensions
route.postBuild()
return route
}
func concatPath(path1, path2 string) string {
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
// merge two paths using the current (package global) merge path strategy.
func concatPath(rootPath, routePath string) string {
if TrimRightSlashEnabled {
return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/")
} else {
return path.Join(rootPath, routePath)
}
}
var anonymousFuncCount int32

13
vendor/github.com/fsnotify/fsnotify/.cirrus.yml generated vendored Normal file
View File

@@ -0,0 +1,13 @@
freebsd_task:
name: 'FreeBSD'
freebsd_instance:
image_family: freebsd-13-2
install_script:
- pkg update -f
- pkg install -y go
test_script:
# run tests as user "cirrus" instead of root
- pw useradd cirrus -m
- chown -R cirrus:cirrus .
- FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
- sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...

View File

@@ -4,3 +4,4 @@
# Output of go build ./cmd/fsnotify
/fsnotify
/fsnotify.exe

View File

@@ -1,16 +1,87 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
Unreleased
----------
Nothing yet.
## [1.6.0] - 2022-10-13
1.7.0 - 2023-10-22
------------------
This version of fsnotify needs Go 1.17.
### Additions
- illumos: add FEN backend to support illumos and Solaris. ([#371])
- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful
in cases where you can't control the kernel buffer and receive a large number
of events in bursts. ([#550], [#572])
- all: add `AddWith()`, which is identical to `Add()` but allows passing
options. ([#521])
- windows: allow setting the ReadDirectoryChangesW() buffer size with
`fsnotify.WithBufferSize()`; the default of 64K is the highest value that
works on all platforms and is enough for most purposes, but in some cases a
highest buffer is needed. ([#521])
### Changes and fixes
- inotify: remove watcher if a watched path is renamed ([#518])
After a rename the reported name wasn't updated, or even an empty string.
Inotify doesn't provide any good facilities to update it, so just remove the
watcher. This is already how it worked on kqueue and FEN.
On Windows this does work, and remains working.
- windows: don't listen for file attribute changes ([#520])
File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API,
with no way to see if they're a file write or attribute change, so would show
up as a fsnotify.Write event. This is never useful, and could result in many
spurious Write events.
- windows: return `ErrEventOverflow` if the buffer is full ([#525])
Before it would merely return "short read", making it hard to detect this
error.
- kqueue: make sure events for all files are delivered properly when removing a
watched directory ([#526])
Previously they would get sent with `""` (empty string) or `"."` as the path
name.
- kqueue: don't emit spurious Create events for symbolic links ([#524])
The link would get resolved but kqueue would "forget" it already saw the link
itself, resulting on a Create for every Write event for the directory.
- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516])
- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in
`backend_other.go`, making it easier to use on unsupported platforms such as
WASM, AIX, etc. ([#528])
- other: use the `backend_other.go` no-op if the `appengine` build tag is set;
Google AppEngine forbids usage of the unsafe package so the inotify backend
won't compile there.
[#371]: https://github.com/fsnotify/fsnotify/pull/371
[#516]: https://github.com/fsnotify/fsnotify/pull/516
[#518]: https://github.com/fsnotify/fsnotify/pull/518
[#520]: https://github.com/fsnotify/fsnotify/pull/520
[#521]: https://github.com/fsnotify/fsnotify/pull/521
[#524]: https://github.com/fsnotify/fsnotify/pull/524
[#525]: https://github.com/fsnotify/fsnotify/pull/525
[#526]: https://github.com/fsnotify/fsnotify/pull/526
[#528]: https://github.com/fsnotify/fsnotify/pull/528
[#537]: https://github.com/fsnotify/fsnotify/pull/537
[#550]: https://github.com/fsnotify/fsnotify/pull/550
[#572]: https://github.com/fsnotify/fsnotify/pull/572
1.6.0 - 2022-10-13
------------------
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
but not documented). It also increases the minimum Linux version to 2.6.32.

View File

@@ -1,29 +1,31 @@
fsnotify is a Go library to provide cross-platform filesystem notifications on
Windows, Linux, macOS, and BSD systems.
Windows, Linux, macOS, BSD, and illumos.
Go 1.16 or newer is required; the full documentation is at
Go 1.17 or newer is required; the full documentation is at
https://pkg.go.dev/github.com/fsnotify/fsnotify
**It's best to read the documentation at pkg.go.dev, as it's pinned to the last
released version, whereas this README is for the last development version which
may include additions/changes.**
---
Platform support:
| Adapter | OS | Status |
| --------------------- | ---------------| -------------------------------------------------------------|
| inotify | Linux 2.6.32+ | Supported |
| kqueue | BSD, macOS | Supported |
| ReadDirectoryChangesW | Windows | Supported |
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
| Backend | OS | Status |
| :-------------------- | :--------- | :------------------------------------------------------------------------ |
| inotify | Linux | Supported |
| kqueue | BSD, macOS | Supported |
| ReadDirectoryChangesW | Windows | Supported |
| FEN | illumos | Supported |
| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment |
| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
Linux and macOS should include Android and iOS, but these are currently untested.
Linux and illumos should include Android and Solaris, but these are currently
untested.
[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129
Usage
-----
@@ -83,20 +85,23 @@ run with:
% go run ./cmd/fsnotify
Further detailed documentation can be found in godoc:
https://pkg.go.dev/github.com/fsnotify/fsnotify
FAQ
---
### Will a file still be watched when it's moved to another directory?
No, not unless you are watching the location it was moved to.
### Are subdirectories watched too?
### Are subdirectories watched?
No, you must add watches for any directory you want to watch (a recursive
watcher is on the roadmap: [#18]).
[#18]: https://github.com/fsnotify/fsnotify/issues/18
### Do I have to watch the Error and Event channels in a goroutine?
As of now, yes (you can read both channels in the same goroutine using `select`,
you don't need a separate goroutine for both channels; see the example).
Yes. You can read both channels in the same goroutine using `select` (you don't
need a separate goroutine for both channels; see the example).
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
fsnotify requires support from underlying OS to work. The current NFS and SMB
@@ -107,6 +112,32 @@ This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
[#9]: https://github.com/fsnotify/fsnotify/issues/9
### Why do I get many Chmod events?
Some programs may generate a lot of attribute changes; for example Spotlight on
macOS, anti-virus programs, backup applications, and some others are known to do
this. As a rule, it's typically best to ignore Chmod events. They're often not
useful, and tend to cause problems.
Spotlight indexing on macOS can result in multiple events (see [#15]). A
temporary workaround is to add your folder(s) to the *Spotlight Privacy
settings* until we have a native FSEvents implementation (see [#11]).
[#11]: https://github.com/fsnotify/fsnotify/issues/11
[#15]: https://github.com/fsnotify/fsnotify/issues/15
### Watching a file doesn't work well
Watching individual files (rather than directories) is generally not recommended
as many programs (especially editors) update files atomically: it will write to
a temporary file which is then moved to to destination, overwriting the original
(or some variant thereof). The watcher on the original file is now lost, as that
no longer exists.
The upshot of this is that a power failure or crash won't leave a half-written
file.
Watch the parent directory and use `Event.Name` to filter out files you're not
interested in. There is an example of this in `cmd/fsnotify/file.go`.
Platform-specific notes
-----------------------
### Linux
@@ -151,11 +182,3 @@ these platforms.
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
control the maximum number of open files.
### macOS
Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary
workaround is to add your folder(s) to the *Spotlight Privacy settings* until we
have a native FSEvents implementation (see [#11]).
[#11]: https://github.com/fsnotify/fsnotify/issues/11
[#15]: https://github.com/fsnotify/fsnotify/issues/15

View File

@@ -1,10 +1,19 @@
//go:build solaris
// +build solaris
// Note: the documentation on the Watcher type and methods is generated from
// mkdoc.zsh
package fsnotify
import (
"errors"
"fmt"
"os"
"path/filepath"
"sync"
"golang.org/x/sys/unix"
)
// Watcher watches a set of paths, delivering events on a channel.
@@ -17,9 +26,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
@@ -33,16 +42,16 @@ import (
//
// To increase them you can use sysctl or write the value to the /proc file:
//
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
@@ -58,14 +67,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
// # macOS notes
// # Windows notes
//
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
// Settings" until we have a native FSEvents implementation (see [#11]).
// Paths can be added as "C:\path\to\dir", but forward slashes
// ("C:/path/to/dir") will also work.
//
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
// When a watched directory is removed it will always send an event for the
// directory itself, but may not send events for all files in that directory.
// Sometimes it will send events for all times, sometimes it will send no
// events, and often only for some files.
//
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
// value that is guaranteed to work with SMB filesystems. If you have many
// events in quick succession this may not be enough, and you will have to use
// [WithBufferSize] to increase the value.
type Watcher struct {
// Events sends the filesystem change events.
//
@@ -92,44 +107,129 @@ type Watcher struct {
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
// you may get hundreds of Write events, so you
// probably want to wait until you've stopped receiving
// them (see the dedup example in cmd/fsnotify).
// you may get hundreds of Write events, and you may
// want to wait until you've stopped receiving them
// (see the dedup example in cmd/fsnotify).
//
// Some systems may send Write event for directories
// when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
// and on kqueue when a file is truncated. On Windows
// it's never sent.
// when a file is truncated. On Windows it's never
// sent.
Events chan Event
// Errors sends any errors.
//
// ErrEventOverflow is used to indicate there are too many events:
//
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
// - kqueue, fen: Not used.
Errors chan error
mu sync.Mutex
port *unix.EventPort
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
dirs map[string]struct{} // Explicitly watched directories
watches map[string]struct{} // Explicitly watched non-directories
}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
return NewBufferedWatcher(0)
}
// Close removes all watches and closes the events channel.
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
// channel.
//
// The main use case for this is situations with a very large number of events
// where the kernel buffer size can't be increased (e.g. due to lack of
// permissions). An unbuffered Watcher will perform better for almost all use
// cases, and whenever possible you will be better off increasing the kernel
// buffers instead of adding a large userspace buffer.
func NewBufferedWatcher(sz uint) (*Watcher, error) {
w := &Watcher{
Events: make(chan Event, sz),
Errors: make(chan error),
dirs: make(map[string]struct{}),
watches: make(map[string]struct{}),
done: make(chan struct{}),
}
var err error
w.port, err = unix.NewEventPort()
if err != nil {
return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err)
}
go w.readEvents()
return w, nil
}
// sendEvent attempts to send an event to the user, returning true if the event
// was put in the channel successfully and false if the watcher has been closed.
func (w *Watcher) sendEvent(name string, op Op) (sent bool) {
select {
case w.Events <- Event{Name: name, Op: op}:
return true
case <-w.done:
return false
}
}
// sendError attempts to send an error to the user, returning true if the error
// was put in the channel successfully and false if the watcher has been closed.
func (w *Watcher) sendError(err error) (sent bool) {
select {
case w.Errors <- err:
return true
case <-w.done:
return false
}
}
func (w *Watcher) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error {
return nil
// Take the lock used by associateFile to prevent lingering events from
// being processed after the close
w.mu.Lock()
defer w.mu.Unlock()
if w.isClosed() {
return nil
}
close(w.done)
return w.port.Close()
}
// Add starts monitoring the path for changes.
//
// A path can only be watched once; attempting to watch it more than once will
// return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted.
// A path can only be watched once; watching it more than once is a no-op and will
// not return an error. Paths that do not yet exist on the filesystem cannot be
// watched.
//
// A path will remain watched if it gets renamed to somewhere else on the same
// filesystem, but the monitor will get removed if the path gets deleted and
// re-created, or if it's moved to a different filesystem.
// A watch will be automatically removed if the watched path is deleted or
// renamed. The exception is the Windows backend, which doesn't remove the
// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
// Returns [ErrClosed] if [Watcher.Close] was called.
//
// See [Watcher.AddWith] for a version that allows adding options.
//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -139,15 +239,63 @@ func (w *Watcher) Close() error {
// # Watching files
//
// Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing
// to the file a temporary file will be written to first, and if successful the
// temporary file is moved to to destination removing the original, or some
// variant thereof. The watcher on the original file is now lost, as it no
// longer exists.
// recommended as many programs (especially editors) update files atomically: it
// will write to a temporary file which is then moved to to destination,
// overwriting the original (or some variant thereof). The watcher on the
// original file is now lost, as that no longer exists.
//
// Instead, watch the parent directory and use Event.Name to filter out files
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
func (w *Watcher) Add(name string) error {
// The upshot of this is that a power failure or crash won't leave a
// half-written file.
//
// Watch the parent directory and use Event.Name to filter out files you're not
// interested in. There is an example of this in cmd/fsnotify/file.go.
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
// the defaults described below are used.
//
// Possible options are:
//
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
// other platforms. The default is 64K (65536 bytes).
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
if w.port.PathIsWatched(name) {
return nil
}
_ = getOptions(opts...)
// Currently we resolve symlinks that were explicitly requested to be
// watched. Otherwise we would use LStat here.
stat, err := os.Stat(name)
if err != nil {
return err
}
// Associate all files in the directory.
if stat.IsDir() {
err := w.handleDirectory(name, stat, true, w.associateFile)
if err != nil {
return err
}
w.mu.Lock()
w.dirs[name] = struct{}{}
w.mu.Unlock()
return nil
}
err = w.associateFile(name, stat, true)
if err != nil {
return err
}
w.mu.Lock()
w.watches[name] = struct{}{}
w.mu.Unlock()
return nil
}
@@ -157,6 +305,336 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error {
if w.isClosed() {
return nil
}
if !w.port.PathIsWatched(name) {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
// The user has expressed an intent. Immediately remove this name from
// whichever watch list it might be in. If it's not in there the delete
// doesn't cause harm.
w.mu.Lock()
delete(w.watches, name)
delete(w.dirs, name)
w.mu.Unlock()
stat, err := os.Stat(name)
if err != nil {
return err
}
// Remove associations for every file in the directory.
if stat.IsDir() {
err := w.handleDirectory(name, stat, false, w.dissociateFile)
if err != nil {
return err
}
return nil
}
err = w.port.DissociatePath(name)
if err != nil {
return err
}
return nil
}
// readEvents contains the main loop that runs in a goroutine watching for events.
func (w *Watcher) readEvents() {
// If this function returns, the watcher has been closed and we can close
// these channels
defer func() {
close(w.Errors)
close(w.Events)
}()
pevents := make([]unix.PortEvent, 8)
for {
count, err := w.port.Get(pevents, 1, nil)
if err != nil && err != unix.ETIME {
// Interrupted system call (count should be 0) ignore and continue
if errors.Is(err, unix.EINTR) && count == 0 {
continue
}
// Get failed because we called w.Close()
if errors.Is(err, unix.EBADF) && w.isClosed() {
return
}
// There was an error not caused by calling w.Close()
if !w.sendError(err) {
return
}
}
p := pevents[:count]
for _, pevent := range p {
if pevent.Source != unix.PORT_SOURCE_FILE {
// Event from unexpected source received; should never happen.
if !w.sendError(errors.New("Event from unexpected source received")) {
return
}
continue
}
err = w.handleEvent(&pevent)
if err != nil {
if !w.sendError(err) {
return
}
}
}
}
}
func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
files, err := os.ReadDir(path)
if err != nil {
return err
}
// Handle all children of the directory.
for _, entry := range files {
finfo, err := entry.Info()
if err != nil {
return err
}
err = handler(filepath.Join(path, finfo.Name()), finfo, false)
if err != nil {
return err
}
}
// And finally handle the directory itself.
return handler(path, stat, follow)
}
// handleEvent might need to emit more than one fsnotify event if the events
// bitmap matches more than one event type (e.g. the file was both modified and
// had the attributes changed between when the association was created and the
// when event was returned)
func (w *Watcher) handleEvent(event *unix.PortEvent) error {
var (
events = event.Events
path = event.Path
fmode = event.Cookie.(os.FileMode)
reRegister = true
)
w.mu.Lock()
_, watchedDir := w.dirs[path]
_, watchedPath := w.watches[path]
w.mu.Unlock()
isWatched := watchedDir || watchedPath
if events&unix.FILE_DELETE != 0 {
if !w.sendEvent(path, Remove) {
return nil
}
reRegister = false
}
if events&unix.FILE_RENAME_FROM != 0 {
if !w.sendEvent(path, Rename) {
return nil
}
// Don't keep watching the new file name
reRegister = false
}
if events&unix.FILE_RENAME_TO != 0 {
// We don't report a Rename event for this case, because Rename events
// are interpreted as referring to the _old_ name of the file, and in
// this case the event would refer to the new name of the file. This
// type of rename event is not supported by fsnotify.
// inotify reports a Remove event in this case, so we simulate this
// here.
if !w.sendEvent(path, Remove) {
return nil
}
// Don't keep watching the file that was removed
reRegister = false
}
// The file is gone, nothing left to do.
if !reRegister {
if watchedDir {
w.mu.Lock()
delete(w.dirs, path)
w.mu.Unlock()
}
if watchedPath {
w.mu.Lock()
delete(w.watches, path)
w.mu.Unlock()
}
return nil
}
// If we didn't get a deletion the file still exists and we're going to have
// to watch it again. Let's Stat it now so that we can compare permissions
// and have what we need to continue watching the file
stat, err := os.Lstat(path)
if err != nil {
// This is unexpected, but we should still emit an event. This happens
// most often on "rm -r" of a subdirectory inside a watched directory We
// get a modify event of something happening inside, but by the time we
// get here, the sudirectory is already gone. Clearly we were watching
// this path but now it is gone. Let's tell the user that it was
// removed.
if !w.sendEvent(path, Remove) {
return nil
}
// Suppress extra write events on removed directories; they are not
// informative and can be confusing.
return nil
}
// resolve symlinks that were explicitly watched as we would have at Add()
// time. this helps suppress spurious Chmod events on watched symlinks
if isWatched {
stat, err = os.Stat(path)
if err != nil {
// The symlink still exists, but the target is gone. Report the
// Remove similar to above.
if !w.sendEvent(path, Remove) {
return nil
}
// Don't return the error
}
}
if events&unix.FILE_MODIFIED != 0 {
if fmode.IsDir() {
if watchedDir {
if err := w.updateDirectory(path); err != nil {
return err
}
} else {
if !w.sendEvent(path, Write) {
return nil
}
}
} else {
if !w.sendEvent(path, Write) {
return nil
}
}
}
if events&unix.FILE_ATTRIB != 0 && stat != nil {
// Only send Chmod if perms changed
if stat.Mode().Perm() != fmode.Perm() {
if !w.sendEvent(path, Chmod) {
return nil
}
}
}
if stat != nil {
// If we get here, it means we've hit an event above that requires us to
// continue watching the file or directory
return w.associateFile(path, stat, isWatched)
}
return nil
}
func (w *Watcher) updateDirectory(path string) error {
// The directory was modified, so we must find unwatched entities and watch
// them. If something was removed from the directory, nothing will happen,
// as everything else should still be watched.
files, err := os.ReadDir(path)
if err != nil {
return err
}
for _, entry := range files {
path := filepath.Join(path, entry.Name())
if w.port.PathIsWatched(path) {
continue
}
finfo, err := entry.Info()
if err != nil {
return err
}
err = w.associateFile(path, finfo, false)
if err != nil {
if !w.sendError(err) {
return nil
}
}
if !w.sendEvent(path, Create) {
return nil
}
}
return nil
}
func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error {
if w.isClosed() {
return ErrClosed
}
// This is primarily protecting the call to AssociatePath but it is
// important and intentional that the call to PathIsWatched is also
// protected by this mutex. Without this mutex, AssociatePath has been seen
// to error out that the path is already associated.
w.mu.Lock()
defer w.mu.Unlock()
if w.port.PathIsWatched(path) {
// Remove the old association in favor of this one If we get ENOENT,
// then while the x/sys/unix wrapper still thought that this path was
// associated, the underlying event port did not. This call will have
// cleared up that discrepancy. The most likely cause is that the event
// has fired but we haven't processed it yet.
err := w.port.DissociatePath(path)
if err != nil && err != unix.ENOENT {
return err
}
}
// FILE_NOFOLLOW means we watch symlinks themselves rather than their
// targets.
events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW
if follow {
// We *DO* follow symlinks for explicitly watched entries.
events = unix.FILE_MODIFIED | unix.FILE_ATTRIB
}
return w.port.AssociatePath(path, stat,
events,
stat.Mode())
}
func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error {
if !w.port.PathIsWatched(path) {
return nil
}
return w.port.DissociatePath(path)
}
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
// yet removed).
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string {
if w.isClosed() {
return nil
}
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.watches)+len(w.dirs))
for pathname := range w.dirs {
entries = append(entries, pathname)
}
for pathname := range w.watches {
entries = append(entries, pathname)
}
return entries
}

View File

@@ -1,5 +1,8 @@
//go:build linux
// +build linux
//go:build linux && !appengine
// +build linux,!appengine
// Note: the documentation on the Watcher type and methods is generated from
// mkdoc.zsh
package fsnotify
@@ -26,9 +29,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
@@ -42,16 +45,16 @@ import (
//
// To increase them you can use sysctl or write the value to the /proc file:
//
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
@@ -67,14 +70,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
// # macOS notes
// # Windows notes
//
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
// Settings" until we have a native FSEvents implementation (see [#11]).
// Paths can be added as "C:\path\to\dir", but forward slashes
// ("C:/path/to/dir") will also work.
//
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
// When a watched directory is removed it will always send an event for the
// directory itself, but may not send events for all files in that directory.
// Sometimes it will send events for all times, sometimes it will send no
// events, and often only for some files.
//
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
// value that is guaranteed to work with SMB filesystems. If you have many
// events in quick succession this may not be enough, and you will have to use
// [WithBufferSize] to increase the value.
type Watcher struct {
// Events sends the filesystem change events.
//
@@ -101,36 +110,148 @@ type Watcher struct {
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
// you may get hundreds of Write events, so you
// probably want to wait until you've stopped receiving
// them (see the dedup example in cmd/fsnotify).
// you may get hundreds of Write events, and you may
// want to wait until you've stopped receiving them
// (see the dedup example in cmd/fsnotify).
//
// Some systems may send Write event for directories
// when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
// and on kqueue when a file is truncated. On Windows
// it's never sent.
// when a file is truncated. On Windows it's never
// sent.
Events chan Event
// Errors sends any errors.
//
// ErrEventOverflow is used to indicate there are too many events:
//
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
// - kqueue, fen: Not used.
Errors chan error
// Store fd here as os.File.Read() will no longer return on close after
// calling Fd(). See: https://github.com/golang/go/issues/26439
fd int
mu sync.Mutex // Map access
inotifyFile *os.File
watches map[string]*watch // Map of inotify watches (key: path)
paths map[int]string // Map of watched paths (key: watch descriptor)
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
doneResp chan struct{} // Channel to respond to Close
watches *watches
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
closeMu sync.Mutex
doneResp chan struct{} // Channel to respond to Close
}
type (
watches struct {
mu sync.RWMutex
wd map[uint32]*watch // wd → watch
path map[string]uint32 // pathname → wd
}
watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
path string // Watch path.
}
)
func newWatches() *watches {
return &watches{
wd: make(map[uint32]*watch),
path: make(map[string]uint32),
}
}
func (w *watches) len() int {
w.mu.RLock()
defer w.mu.RUnlock()
return len(w.wd)
}
func (w *watches) add(ww *watch) {
w.mu.Lock()
defer w.mu.Unlock()
w.wd[ww.wd] = ww
w.path[ww.path] = ww.wd
}
func (w *watches) remove(wd uint32) {
w.mu.Lock()
defer w.mu.Unlock()
delete(w.path, w.wd[wd].path)
delete(w.wd, wd)
}
func (w *watches) removePath(path string) (uint32, bool) {
w.mu.Lock()
defer w.mu.Unlock()
wd, ok := w.path[path]
if !ok {
return 0, false
}
delete(w.path, path)
delete(w.wd, wd)
return wd, true
}
func (w *watches) byPath(path string) *watch {
w.mu.RLock()
defer w.mu.RUnlock()
return w.wd[w.path[path]]
}
func (w *watches) byWd(wd uint32) *watch {
w.mu.RLock()
defer w.mu.RUnlock()
return w.wd[wd]
}
func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
w.mu.Lock()
defer w.mu.Unlock()
var existing *watch
wd, ok := w.path[path]
if ok {
existing = w.wd[wd]
}
upd, err := f(existing)
if err != nil {
return err
}
if upd != nil {
w.wd[upd.wd] = upd
w.path[upd.path] = upd.wd
if upd.wd != wd {
delete(w.wd, wd)
}
}
return nil
}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
// Create inotify fd
// Need to set the FD to nonblocking mode in order for SetDeadline methods to work
// Otherwise, blocking i/o operations won't terminate on close
return NewBufferedWatcher(0)
}
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
// channel.
//
// The main use case for this is situations with a very large number of events
// where the kernel buffer size can't be increased (e.g. due to lack of
// permissions). An unbuffered Watcher will perform better for almost all use
// cases, and whenever possible you will be better off increasing the kernel
// buffers instead of adding a large userspace buffer.
func NewBufferedWatcher(sz uint) (*Watcher, error) {
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
// I/O operations won't terminate on close.
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
if fd == -1 {
return nil, errno
@@ -139,9 +260,8 @@ func NewWatcher() (*Watcher, error) {
w := &Watcher{
fd: fd,
inotifyFile: os.NewFile(uintptr(fd), ""),
watches: make(map[string]*watch),
paths: make(map[int]string),
Events: make(chan Event),
watches: newWatches(),
Events: make(chan Event, sz),
Errors: make(chan error),
done: make(chan struct{}),
doneResp: make(chan struct{}),
@@ -157,8 +277,8 @@ func (w *Watcher) sendEvent(e Event) bool {
case w.Events <- e:
return true
case <-w.done:
return false
}
return false
}
// Returns true if the error was sent, or false if watcher is closed.
@@ -180,17 +300,15 @@ func (w *Watcher) isClosed() bool {
}
}
// Close removes all watches and closes the events channel.
// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error {
w.mu.Lock()
w.closeMu.Lock()
if w.isClosed() {
w.mu.Unlock()
w.closeMu.Unlock()
return nil
}
// Send 'close' signal to goroutine, and set the Watcher to closed.
close(w.done)
w.mu.Unlock()
w.closeMu.Unlock()
// Causes any blocking reads to return with an error, provided the file
// still supports deadline operations.
@@ -207,17 +325,21 @@ func (w *Watcher) Close() error {
// Add starts monitoring the path for changes.
//
// A path can only be watched once; attempting to watch it more than once will
// return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted.
// A path can only be watched once; watching it more than once is a no-op and will
// not return an error. Paths that do not yet exist on the filesystem cannot be
// watched.
//
// A path will remain watched if it gets renamed to somewhere else on the same
// filesystem, but the monitor will get removed if the path gets deleted and
// re-created, or if it's moved to a different filesystem.
// A watch will be automatically removed if the watched path is deleted or
// renamed. The exception is the Windows backend, which doesn't remove the
// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
// Returns [ErrClosed] if [Watcher.Close] was called.
//
// See [Watcher.AddWith] for a version that allows adding options.
//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -227,44 +349,59 @@ func (w *Watcher) Close() error {
// # Watching files
//
// Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing
// to the file a temporary file will be written to first, and if successful the
// temporary file is moved to to destination removing the original, or some
// variant thereof. The watcher on the original file is now lost, as it no
// longer exists.
// recommended as many programs (especially editors) update files atomically: it
// will write to a temporary file which is then moved to to destination,
// overwriting the original (or some variant thereof). The watcher on the
// original file is now lost, as that no longer exists.
//
// Instead, watch the parent directory and use Event.Name to filter out files
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
func (w *Watcher) Add(name string) error {
name = filepath.Clean(name)
// The upshot of this is that a power failure or crash won't leave a
// half-written file.
//
// Watch the parent directory and use Event.Name to filter out files you're not
// interested in. There is an example of this in cmd/fsnotify/file.go.
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
// the defaults described below are used.
//
// Possible options are:
//
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
// other platforms. The default is 64K (65536 bytes).
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return errors.New("inotify instance already closed")
return ErrClosed
}
name = filepath.Clean(name)
_ = getOptions(opts...)
var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
w.mu.Lock()
defer w.mu.Unlock()
watchEntry := w.watches[name]
if watchEntry != nil {
flags |= watchEntry.flags | unix.IN_MASK_ADD
}
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return errno
}
return w.watches.updatePath(name, func(existing *watch) (*watch, error) {
if existing != nil {
flags |= existing.flags | unix.IN_MASK_ADD
}
if watchEntry == nil {
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
w.paths[wd] = name
} else {
watchEntry.wd = uint32(wd)
watchEntry.flags = flags
}
wd, err := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return nil, err
}
return nil
if existing == nil {
return &watch{
wd: uint32(wd),
path: name,
flags: flags,
}, nil
}
existing.wd = uint32(wd)
existing.flags = flags
return existing, nil
})
}
// Remove stops monitoring the path for changes.
@@ -273,32 +410,22 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
if w.isClosed() {
return nil
}
return w.remove(filepath.Clean(name))
}
// Fetch the watch.
w.mu.Lock()
defer w.mu.Unlock()
watch, ok := w.watches[name]
// Remove it from inotify.
func (w *Watcher) remove(name string) error {
wd, ok := w.watches.removePath(name)
if !ok {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
// We successfully removed the watch if InotifyRmWatch doesn't return an
// error, we need to clean up our internal state to ensure it matches
// inotify's kernel state.
delete(w.paths, int(watch.wd))
delete(w.watches, name)
// inotify_rm_watch will return EINVAL if the file has been deleted;
// the inotify will already have been removed.
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
// by another thread and we have not received IN_IGNORE event.
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
success, errno := unix.InotifyRmWatch(w.fd, wd)
if success == -1 {
// TODO: Perhaps it's not helpful to return an error here in every case;
// The only two possible errors are:
@@ -312,26 +439,26 @@ func (w *Watcher) Remove(name string) error {
// are watching is deleted.
return errno
}
return nil
}
// WatchList returns all paths added with [Add] (and are not yet removed).
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
// yet removed).
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string {
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.watches))
for pathname := range w.watches {
entries = append(entries, pathname)
if w.isClosed() {
return nil
}
return entries
}
entries := make([]string, 0, w.watches.len())
w.watches.mu.RLock()
for pathname := range w.watches.path {
entries = append(entries, pathname)
}
w.watches.mu.RUnlock()
type watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
return entries
}
// readEvents reads from the inotify file descriptor, converts the
@@ -367,14 +494,11 @@ func (w *Watcher) readEvents() {
if n < unix.SizeofInotifyEvent {
var err error
if n == 0 {
// If EOF is received. This should really never happen.
err = io.EOF
err = io.EOF // If EOF is received. This should really never happen.
} else if n < 0 {
// If an error occurred while reading.
err = errno
err = errno // If an error occurred while reading.
} else {
// Read was too short.
err = errors.New("notify: short read in readEvents()")
err = errors.New("notify: short read in readEvents()") // Read was too short.
}
if !w.sendError(err) {
return
@@ -403,18 +527,29 @@ func (w *Watcher) readEvents() {
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
w.mu.Lock()
name, ok := w.paths[int(raw.Wd)]
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
// This is a sign to clean up the maps, otherwise we are no longer in sync
// with the inotify kernel state which has already deleted the watch
// automatically.
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
delete(w.paths, int(raw.Wd))
delete(w.watches, name)
}
w.mu.Unlock()
watch := w.watches.byWd(uint32(raw.Wd))
// inotify will automatically remove the watch on deletes; just need
// to clean our state here.
if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
w.watches.remove(watch.wd)
}
// We can't really update the state when a watched path is moved;
// only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove
// the watch.
if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
err := w.remove(watch.path)
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
if !w.sendError(err) {
return
}
}
}
var name string
if watch != nil {
name = watch.path
}
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]

View File

@@ -1,12 +1,14 @@
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
// +build freebsd openbsd netbsd dragonfly darwin
// Note: the documentation on the Watcher type and methods is generated from
// mkdoc.zsh
package fsnotify
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
@@ -24,9 +26,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
@@ -40,16 +42,16 @@ import (
//
// To increase them you can use sysctl or write the value to the /proc file:
//
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
@@ -65,14 +67,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
// # macOS notes
// # Windows notes
//
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
// Settings" until we have a native FSEvents implementation (see [#11]).
// Paths can be added as "C:\path\to\dir", but forward slashes
// ("C:/path/to/dir") will also work.
//
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
// When a watched directory is removed it will always send an event for the
// directory itself, but may not send events for all files in that directory.
// Sometimes it will send events for all times, sometimes it will send no
// events, and often only for some files.
//
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
// value that is guaranteed to work with SMB filesystems. If you have many
// events in quick succession this may not be enough, and you will have to use
// [WithBufferSize] to increase the value.
type Watcher struct {
// Events sends the filesystem change events.
//
@@ -99,18 +107,27 @@ type Watcher struct {
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
// you may get hundreds of Write events, so you
// probably want to wait until you've stopped receiving
// them (see the dedup example in cmd/fsnotify).
// you may get hundreds of Write events, and you may
// want to wait until you've stopped receiving them
// (see the dedup example in cmd/fsnotify).
//
// Some systems may send Write event for directories
// when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
// and on kqueue when a file is truncated. On Windows
// it's never sent.
// when a file is truncated. On Windows it's never
// sent.
Events chan Event
// Errors sends any errors.
//
// ErrEventOverflow is used to indicate there are too many events:
//
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
// - kqueue, fen: Not used.
Errors chan error
done chan struct{}
@@ -133,6 +150,18 @@ type pathInfo struct {
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
return NewBufferedWatcher(0)
}
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
// channel.
//
// The main use case for this is situations with a very large number of events
// where the kernel buffer size can't be increased (e.g. due to lack of
// permissions). An unbuffered Watcher will perform better for almost all use
// cases, and whenever possible you will be better off increasing the kernel
// buffers instead of adding a large userspace buffer.
func NewBufferedWatcher(sz uint) (*Watcher, error) {
kq, closepipe, err := newKqueue()
if err != nil {
return nil, err
@@ -147,7 +176,7 @@ func NewWatcher() (*Watcher, error) {
paths: make(map[int]pathInfo),
fileExists: make(map[string]struct{}),
userWatches: make(map[string]struct{}),
Events: make(chan Event),
Events: make(chan Event, sz),
Errors: make(chan error),
done: make(chan struct{}),
}
@@ -197,8 +226,8 @@ func (w *Watcher) sendEvent(e Event) bool {
case w.Events <- e:
return true
case <-w.done:
return false
}
return false
}
// Returns true if the error was sent, or false if watcher is closed.
@@ -207,11 +236,11 @@ func (w *Watcher) sendError(err error) bool {
case w.Errors <- err:
return true
case <-w.done:
return false
}
return false
}
// Close removes all watches and closes the events channel.
// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error {
w.mu.Lock()
if w.isClosed {
@@ -239,17 +268,21 @@ func (w *Watcher) Close() error {
// Add starts monitoring the path for changes.
//
// A path can only be watched once; attempting to watch it more than once will
// return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted.
// A path can only be watched once; watching it more than once is a no-op and will
// not return an error. Paths that do not yet exist on the filesystem cannot be
// watched.
//
// A path will remain watched if it gets renamed to somewhere else on the same
// filesystem, but the monitor will get removed if the path gets deleted and
// re-created, or if it's moved to a different filesystem.
// A watch will be automatically removed if the watched path is deleted or
// renamed. The exception is the Windows backend, which doesn't remove the
// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
// Returns [ErrClosed] if [Watcher.Close] was called.
//
// See [Watcher.AddWith] for a version that allows adding options.
//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -259,15 +292,28 @@ func (w *Watcher) Close() error {
// # Watching files
//
// Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing
// to the file a temporary file will be written to first, and if successful the
// temporary file is moved to to destination removing the original, or some
// variant thereof. The watcher on the original file is now lost, as it no
// longer exists.
// recommended as many programs (especially editors) update files atomically: it
// will write to a temporary file which is then moved to to destination,
// overwriting the original (or some variant thereof). The watcher on the
// original file is now lost, as that no longer exists.
//
// Instead, watch the parent directory and use Event.Name to filter out files
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
func (w *Watcher) Add(name string) error {
// The upshot of this is that a power failure or crash won't leave a
// half-written file.
//
// Watch the parent directory and use Event.Name to filter out files you're not
// interested in. There is an example of this in cmd/fsnotify/file.go.
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
// the defaults described below are used.
//
// Possible options are:
//
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
// other platforms. The default is 64K (65536 bytes).
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
_ = getOptions(opts...)
w.mu.Lock()
w.userWatches[name] = struct{}{}
w.mu.Unlock()
@@ -281,9 +327,19 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error {
return w.remove(name, true)
}
func (w *Watcher) remove(name string, unwatchFiles bool) error {
name = filepath.Clean(name)
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return nil
}
watchfd, ok := w.watches[name]
w.mu.Unlock()
if !ok {
@@ -315,7 +371,7 @@ func (w *Watcher) Remove(name string) error {
w.mu.Unlock()
// Find all watched paths that are in this directory that are not external.
if isDir {
if unwatchFiles && isDir {
var pathsToRemove []string
w.mu.Lock()
for fd := range w.watchesByDir[name] {
@@ -326,20 +382,25 @@ func (w *Watcher) Remove(name string) error {
}
w.mu.Unlock()
for _, name := range pathsToRemove {
// Since these are internal, not much sense in propagating error
// to the user, as that will just confuse them with an error about
// a path they did not explicitly watch themselves.
// Since these are internal, not much sense in propagating error to
// the user, as that will just confuse them with an error about a
// path they did not explicitly watch themselves.
w.Remove(name)
}
}
return nil
}
// WatchList returns all paths added with [Add] (and are not yet removed).
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
// yet removed).
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string {
w.mu.Lock()
defer w.mu.Unlock()
if w.isClosed {
return nil
}
entries := make([]string, 0, len(w.userWatches))
for pathname := range w.userWatches {
@@ -352,18 +413,18 @@ func (w *Watcher) WatchList() []string {
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
// addWatch adds name to the watched file set.
// The flags are interpreted as described in kevent(2).
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
// addWatch adds name to the watched file set; the flags are interpreted as
// described in kevent(2).
//
// Returns the real path to the file which was added, with symlinks resolved.
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
var isDir bool
// Make ./name and name equivalent
name = filepath.Clean(name)
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return "", errors.New("kevent instance already closed")
return "", ErrClosed
}
watchfd, alreadyWatching := w.watches[name]
// We already have a watch, but we can still override flags.
@@ -383,27 +444,30 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
return "", nil
}
// Follow Symlinks
//
// Linux can add unresolvable symlinks to the watch list without issue,
// and Windows can't do symlinks period. To maintain consistency, we
// will act like everything is fine if the link can't be resolved.
// There will simply be no file events for broken symlinks. Hence the
// returns of nil on errors.
// Follow Symlinks.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
name, err = filepath.EvalSymlinks(name)
link, err := os.Readlink(name)
if err != nil {
// Return nil because Linux can add unresolvable symlinks to the
// watch list without problems, so maintain consistency with
// that. There will be no file events for broken symlinks.
// TODO: more specific check; returns os.PathError; ENOENT?
return "", nil
}
w.mu.Lock()
_, alreadyWatching = w.watches[name]
_, alreadyWatching = w.watches[link]
w.mu.Unlock()
if alreadyWatching {
return name, nil
// Add to watches so we don't get spurious Create events later
// on when we diff the directories.
w.watches[name] = 0
w.fileExists[name] = struct{}{}
return link, nil
}
name = link
fi, err = os.Lstat(name)
if err != nil {
return "", nil
@@ -411,7 +475,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
}
// Retry on EINTR; open() can return EINTR in practice on macOS.
// See #354, and go issues 11180 and 39237.
// See #354, and Go issues 11180 and 39237.
for {
watchfd, err = unix.Open(name, openMode, 0)
if err == nil {
@@ -444,14 +508,13 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
w.watchesByDir[parentName] = watchesByDir
}
watchesByDir[watchfd] = struct{}{}
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
w.mu.Unlock()
}
if isDir {
// Watch the directory if it has not been watched before,
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
// Watch the directory if it has not been watched before, or if it was
// watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
w.mu.Lock()
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
@@ -473,13 +536,10 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
// Event values that it sends down the Events channel.
func (w *Watcher) readEvents() {
defer func() {
err := unix.Close(w.kq)
if err != nil {
w.Errors <- err
}
unix.Close(w.closepipe[0])
close(w.Events)
close(w.Errors)
_ = unix.Close(w.kq)
unix.Close(w.closepipe[0])
}()
eventBuffer := make([]unix.Kevent_t, 10)
@@ -513,18 +573,8 @@ func (w *Watcher) readEvents() {
event := w.newEvent(path.name, mask)
if path.isDir && !event.Has(Remove) {
// Double check to make sure the directory exists. This can
// happen when we do a rm -fr on a recursively watched folders
// and we receive a modification event first but the folder has
// been deleted and later receive the delete event.
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
event.Op |= Remove
}
}
if event.Has(Rename) || event.Has(Remove) {
w.Remove(event.Name)
w.remove(event.Name, false)
w.mu.Lock()
delete(w.fileExists, event.Name)
w.mu.Unlock()
@@ -540,26 +590,30 @@ func (w *Watcher) readEvents() {
}
if event.Has(Remove) {
// Look for a file that may have overwritten this.
// For example, mv f1 f2 will delete f2, then create f2.
// Look for a file that may have overwritten this; for example,
// mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
w.mu.Lock()
_, found := w.watches[fileDir]
w.mu.Unlock()
if found {
// make sure the directory exists before we watch for changes. When we
// do a recursive watch and perform rm -fr, the parent directory might
// have gone missing, ignore the missing directory and let the
// upcoming delete event remove the watch from the parent directory.
if _, err := os.Lstat(fileDir); err == nil {
w.sendDirectoryChangeEvents(fileDir)
err := w.sendDirectoryChangeEvents(fileDir)
if err != nil {
if !w.sendError(err) {
closed = true
}
}
}
} else {
filePath := filepath.Clean(event.Name)
if fileInfo, err := os.Lstat(filePath); err == nil {
w.sendFileCreatedEventIfNew(filePath, fileInfo)
if fi, err := os.Lstat(filePath); err == nil {
err := w.sendFileCreatedEventIfNew(filePath, fi)
if err != nil {
if !w.sendError(err) {
closed = true
}
}
}
}
}
@@ -582,21 +636,31 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
e.Op |= Chmod
}
// No point sending a write and delete event at the same time: if it's gone,
// then it's gone.
if e.Op.Has(Write) && e.Op.Has(Remove) {
e.Op &^= Write
}
return e
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
// Get all files
files, err := ioutil.ReadDir(dirPath)
files, err := os.ReadDir(dirPath)
if err != nil {
return err
}
for _, fileInfo := range files {
path := filepath.Join(dirPath, fileInfo.Name())
for _, f := range files {
path := filepath.Join(dirPath, f.Name())
cleanPath, err := w.internalWatch(path, fileInfo)
fi, err := f.Info()
if err != nil {
return fmt.Errorf("%q: %w", path, err)
}
cleanPath, err := w.internalWatch(path, fi)
if err != nil {
// No permission to read the file; that's not a problem: just skip.
// But do add it to w.fileExists to prevent it from being picked up
@@ -606,7 +670,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
cleanPath = filepath.Clean(path)
default:
return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err)
return fmt.Errorf("%q: %w", path, err)
}
}
@@ -622,26 +686,37 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
//
// This functionality is to have the BSD watcher match the inotify, which sends
// a create event for files created in a watched directory.
func (w *Watcher) sendDirectoryChangeEvents(dir string) {
// Get all files
files, err := ioutil.ReadDir(dir)
func (w *Watcher) sendDirectoryChangeEvents(dir string) error {
files, err := os.ReadDir(dir)
if err != nil {
if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) {
return
// Directory no longer exists: we can ignore this safely. kqueue will
// still give us the correct events.
if errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
}
// Search for new files
for _, fi := range files {
err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
for _, f := range files {
fi, err := f.Info()
if err != nil {
return
return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
}
err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
if err != nil {
// Don't need to send an error if this file isn't readable.
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) {
return nil
}
return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
}
}
return nil
}
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) {
w.mu.Lock()
_, doesExist := w.fileExists[filePath]
w.mu.Unlock()
@@ -652,7 +727,7 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf
}
// like watchDirectoryFiles (but without doing another ReadDir)
filePath, err = w.internalWatch(filePath, fileInfo)
filePath, err = w.internalWatch(filePath, fi)
if err != nil {
return err
}
@@ -664,10 +739,10 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf
return nil
}
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
if fileInfo.IsDir() {
// mimic Linux providing delete events for subdirectories
// but preserve the flags used if currently watching subdirectory
func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) {
if fi.IsDir() {
// mimic Linux providing delete events for subdirectories, but preserve
// the flags used if currently watching subdirectory
w.mu.Lock()
flags := w.dirFlags[name]
w.mu.Unlock()

View File

@@ -1,39 +1,169 @@
//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
// Note: the documentation on the Watcher type and methods is generated from
// mkdoc.zsh
package fsnotify
import (
"fmt"
"runtime"
)
import "errors"
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct{}
// Watcher watches a set of paths, delivering events on a channel.
//
// A watcher should not be copied (e.g. pass it by pointer, rather than by
// value).
//
// # Linux notes
//
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
// for the number of watches per user, and fs.inotify.max_user_instances
// specifies the maximum number of inotify instances per user. Every Watcher you
// create is an "instance", and every path you add is a "watch".
//
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
// /proc/sys/fs/inotify/max_user_instances
//
// To increase them you can use sysctl or write the value to the /proc file:
//
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
//
// # kqueue notes (macOS, BSD)
//
// kqueue requires opening a file descriptor for every file that's being watched;
// so if you're watching a directory with five files then that's six file
// descriptors. You will run in to your system's "max open files" limit faster on
// these platforms.
//
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
// # Windows notes
//
// Paths can be added as "C:\path\to\dir", but forward slashes
// ("C:/path/to/dir") will also work.
//
// When a watched directory is removed it will always send an event for the
// directory itself, but may not send events for all files in that directory.
// Sometimes it will send events for all times, sometimes it will send no
// events, and often only for some files.
//
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
// value that is guaranteed to work with SMB filesystems. If you have many
// events in quick succession this may not be enough, and you will have to use
// [WithBufferSize] to increase the value.
type Watcher struct {
// Events sends the filesystem change events.
//
// fsnotify can send the following events; a "path" here can refer to a
// file, directory, symbolic link, or special file like a FIFO.
//
// fsnotify.Create A new path was created; this may be followed by one
// or more Write events if data also gets written to a
// file.
//
// fsnotify.Remove A path was removed.
//
// fsnotify.Rename A path was renamed. A rename is always sent with the
// old path as Event.Name, and a Create event will be
// sent with the new name. Renames are only sent for
// paths that are currently watched; e.g. moving an
// unmonitored file into a monitored directory will
// show up as just a Create. Similarly, renaming a file
// to outside a monitored directory will show up as
// only a Rename.
//
// fsnotify.Write A file or named pipe was written to. A Truncate will
// also trigger a Write. A single "write action"
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
// you may get hundreds of Write events, and you may
// want to wait until you've stopped receiving them
// (see the dedup example in cmd/fsnotify).
//
// Some systems may send Write event for directories
// when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
// when a file is truncated. On Windows it's never
// sent.
Events chan Event
// Errors sends any errors.
//
// ErrEventOverflow is used to indicate there are too many events:
//
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
// - kqueue, fen: Not used.
Errors chan error
}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
return nil, errors.New("fsnotify not supported on the current platform")
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
return nil
}
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
// channel.
//
// The main use case for this is situations with a very large number of events
// where the kernel buffer size can't be increased (e.g. due to lack of
// permissions). An unbuffered Watcher will perform better for almost all use
// cases, and whenever possible you will be better off increasing the kernel
// buffers instead of adding a large userspace buffer.
func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() }
// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error { return nil }
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
// yet removed).
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string { return nil }
// Add starts monitoring the path for changes.
//
// A path can only be watched once; attempting to watch it more than once will
// return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted.
// A path can only be watched once; watching it more than once is a no-op and will
// not return an error. Paths that do not yet exist on the filesystem cannot be
// watched.
//
// A path will remain watched if it gets renamed to somewhere else on the same
// filesystem, but the monitor will get removed if the path gets deleted and
// re-created, or if it's moved to a different filesystem.
// A watch will be automatically removed if the watched path is deleted or
// renamed. The exception is the Windows backend, which doesn't remove the
// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
// Returns [ErrClosed] if [Watcher.Close] was called.
//
// See [Watcher.AddWith] for a version that allows adding options.
//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -43,17 +173,26 @@ func (w *Watcher) Close() error {
// # Watching files
//
// Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing
// to the file a temporary file will be written to first, and if successful the
// temporary file is moved to to destination removing the original, or some
// variant thereof. The watcher on the original file is now lost, as it no
// longer exists.
// recommended as many programs (especially editors) update files atomically: it
// will write to a temporary file which is then moved to to destination,
// overwriting the original (or some variant thereof). The watcher on the
// original file is now lost, as that no longer exists.
//
// Instead, watch the parent directory and use Event.Name to filter out files
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
func (w *Watcher) Add(name string) error {
return nil
}
// The upshot of this is that a power failure or crash won't leave a
// half-written file.
//
// Watch the parent directory and use Event.Name to filter out files you're not
// interested in. There is an example of this in cmd/fsnotify/file.go.
func (w *Watcher) Add(name string) error { return nil }
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
// the defaults described below are used.
//
// Possible options are:
//
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
// other platforms. The default is 64K (65536 bytes).
func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil }
// Remove stops monitoring the path for changes.
//
@@ -61,6 +200,6 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
func (w *Watcher) Remove(name string) error {
return nil
}
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error { return nil }

View File

@@ -1,6 +1,13 @@
//go:build windows
// +build windows
// Windows backend based on ReadDirectoryChangesW()
//
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
//
// Note: the documentation on the Watcher type and methods is generated from
// mkdoc.zsh
package fsnotify
import (
@@ -27,9 +34,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
@@ -43,16 +50,16 @@ import (
//
// To increase them you can use sysctl or write the value to the /proc file:
//
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
@@ -68,14 +75,20 @@ import (
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
// # macOS notes
// # Windows notes
//
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
// Settings" until we have a native FSEvents implementation (see [#11]).
// Paths can be added as "C:\path\to\dir", but forward slashes
// ("C:/path/to/dir") will also work.
//
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
// When a watched directory is removed it will always send an event for the
// directory itself, but may not send events for all files in that directory.
// Sometimes it will send events for all times, sometimes it will send no
// events, and often only for some files.
//
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
// value that is guaranteed to work with SMB filesystems. If you have many
// events in quick succession this may not be enough, and you will have to use
// [WithBufferSize] to increase the value.
type Watcher struct {
// Events sends the filesystem change events.
//
@@ -102,31 +115,52 @@ type Watcher struct {
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
// you may get hundreds of Write events, so you
// probably want to wait until you've stopped receiving
// them (see the dedup example in cmd/fsnotify).
// you may get hundreds of Write events, and you may
// want to wait until you've stopped receiving them
// (see the dedup example in cmd/fsnotify).
//
// Some systems may send Write event for directories
// when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
// and on kqueue when a file is truncated. On Windows
// it's never sent.
// when a file is truncated. On Windows it's never
// sent.
Events chan Event
// Errors sends any errors.
//
// ErrEventOverflow is used to indicate there are too many events:
//
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
// - kqueue, fen: Not used.
Errors chan error
port windows.Handle // Handle to completion port
input chan *input // Inputs to the reader are sent on this channel
quit chan chan<- error
mu sync.Mutex // Protects access to watches, isClosed
watches watchMap // Map of watches (key: i-number)
isClosed bool // Set to true when Close() is first called
mu sync.Mutex // Protects access to watches, closed
watches watchMap // Map of watches (key: i-number)
closed bool // Set to true when Close() is first called
}
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
return NewBufferedWatcher(50)
}
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
// channel.
//
// The main use case for this is situations with a very large number of events
// where the kernel buffer size can't be increased (e.g. due to lack of
// permissions). An unbuffered Watcher will perform better for almost all use
// cases, and whenever possible you will be better off increasing the kernel
// buffers instead of adding a large userspace buffer.
func NewBufferedWatcher(sz uint) (*Watcher, error) {
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
if err != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
@@ -135,7 +169,7 @@ func NewWatcher() (*Watcher, error) {
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
Events: make(chan Event, 50),
Events: make(chan Event, sz),
Errors: make(chan error),
quit: make(chan chan<- error, 1),
}
@@ -143,6 +177,12 @@ func NewWatcher() (*Watcher, error) {
return w, nil
}
func (w *Watcher) isClosed() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.closed
}
func (w *Watcher) sendEvent(name string, mask uint64) bool {
if mask == 0 {
return false
@@ -167,14 +207,14 @@ func (w *Watcher) sendError(err error) bool {
return false
}
// Close removes all watches and closes the events channel.
// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error {
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
if w.isClosed() {
return nil
}
w.isClosed = true
w.mu.Lock()
w.closed = true
w.mu.Unlock()
// Send "quit" message to the reader goroutine
@@ -188,17 +228,21 @@ func (w *Watcher) Close() error {
// Add starts monitoring the path for changes.
//
// A path can only be watched once; attempting to watch it more than once will
// return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted.
// A path can only be watched once; watching it more than once is a no-op and will
// not return an error. Paths that do not yet exist on the filesystem cannot be
// watched.
//
// A path will remain watched if it gets renamed to somewhere else on the same
// filesystem, but the monitor will get removed if the path gets deleted and
// re-created, or if it's moved to a different filesystem.
// A watch will be automatically removed if the watched path is deleted or
// renamed. The exception is the Windows backend, which doesn't remove the
// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
// Returns [ErrClosed] if [Watcher.Close] was called.
//
// See [Watcher.AddWith] for a version that allows adding options.
//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -208,27 +252,41 @@ func (w *Watcher) Close() error {
// # Watching files
//
// Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing
// to the file a temporary file will be written to first, and if successful the
// temporary file is moved to to destination removing the original, or some
// variant thereof. The watcher on the original file is now lost, as it no
// longer exists.
// recommended as many programs (especially editors) update files atomically: it
// will write to a temporary file which is then moved to to destination,
// overwriting the original (or some variant thereof). The watcher on the
// original file is now lost, as that no longer exists.
//
// Instead, watch the parent directory and use Event.Name to filter out files
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
func (w *Watcher) Add(name string) error {
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return errors.New("watcher already closed")
// The upshot of this is that a power failure or crash won't leave a
// half-written file.
//
// Watch the parent directory and use Event.Name to filter out files you're not
// interested in. There is an example of this in cmd/fsnotify/file.go.
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
// the defaults described below are used.
//
// Possible options are:
//
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
// other platforms. The default is 64K (65536 bytes).
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
with := getOptions(opts...)
if with.bufsize < 4096 {
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
}
w.mu.Unlock()
in := &input{
op: opAddWatch,
path: filepath.Clean(name),
flags: sysFSALLEVENTS,
reply: make(chan error),
op: opAddWatch,
path: filepath.Clean(name),
flags: sysFSALLEVENTS,
reply: make(chan error),
bufsize: with.bufsize,
}
w.input <- in
if err := w.wakeupReader(); err != nil {
@@ -243,7 +301,13 @@ func (w *Watcher) Add(name string) error {
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(name string) error {
if w.isClosed() {
return nil
}
in := &input{
op: opRemoveWatch,
path: filepath.Clean(name),
@@ -256,8 +320,15 @@ func (w *Watcher) Remove(name string) error {
return <-in.reply
}
// WatchList returns all paths added with [Add] (and are not yet removed).
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
// yet removed).
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) WatchList() []string {
if w.isClosed() {
return nil
}
w.mu.Lock()
defer w.mu.Unlock()
@@ -279,7 +350,6 @@ func (w *Watcher) WatchList() []string {
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
const (
sysFSALLEVENTS = 0xfff
sysFSATTRIB = 0x4
sysFSCREATE = 0x100
sysFSDELETE = 0x200
sysFSDELETESELF = 0x400
@@ -305,9 +375,6 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
e.Op |= Rename
}
if mask&sysFSATTRIB == sysFSATTRIB {
e.Op |= Chmod
}
return e
}
@@ -321,10 +388,11 @@ const (
)
type input struct {
op int
path string
flags uint32
reply chan error
op int
path string
flags uint32
bufsize int
reply chan error
}
type inode struct {
@@ -334,13 +402,14 @@ type inode struct {
}
type watch struct {
ov windows.Overlapped
ino *inode // i-number
path string // Directory path
mask uint64 // Directory itself is being watched with these notify flags
names map[string]uint64 // Map of names being watched and their notify flags
rename string // Remembers the old name while renaming a file
buf [65536]byte // 64K buffer
ov windows.Overlapped
ino *inode // i-number
recurse bool // Recursive watch?
path string // Directory path
mask uint64 // Directory itself is being watched with these notify flags
names map[string]uint64 // Map of names being watched and their notify flags
rename string // Remembers the old name while renaming a file
buf []byte // buffer, allocated later
}
type (
@@ -413,7 +482,10 @@ func (m watchMap) set(ino *inode, watch *watch) {
}
// Must run within the I/O thread.
func (w *Watcher) addWatch(pathname string, flags uint64) error {
func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error {
//pathname, recurse := recursivePath(pathname)
recurse := false
dir, err := w.getDir(pathname)
if err != nil {
return err
@@ -433,9 +505,11 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error {
return os.NewSyscallError("CreateIoCompletionPort", err)
}
watchEntry = &watch{
ino: ino,
path: dir,
names: make(map[string]uint64),
ino: ino,
path: dir,
names: make(map[string]uint64),
recurse: recurse,
buf: make([]byte, bufsize),
}
w.mu.Lock()
w.watches.set(ino, watchEntry)
@@ -465,6 +539,8 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error {
// Must run within the I/O thread.
func (w *Watcher) remWatch(pathname string) error {
pathname, recurse := recursivePath(pathname)
dir, err := w.getDir(pathname)
if err != nil {
return err
@@ -478,6 +554,10 @@ func (w *Watcher) remWatch(pathname string) error {
watch := w.watches.get(ino)
w.mu.Unlock()
if recurse && !watch.recurse {
return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname)
}
err = windows.CloseHandle(ino.handle)
if err != nil {
w.sendError(os.NewSyscallError("CloseHandle", err))
@@ -535,8 +615,11 @@ func (w *Watcher) startRead(watch *watch) error {
return nil
}
rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
// We need to pass the array, rather than the slice.
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf))
rdErr := windows.ReadDirectoryChanges(watch.ino.handle,
(*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len),
watch.recurse, mask, nil, &watch.ov, 0)
if rdErr != nil {
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
@@ -563,9 +646,8 @@ func (w *Watcher) readEvents() {
runtime.LockOSThread()
for {
// This error is handled after the watch == nil check below.
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
// This error is handled after the watch == nil check below. NOTE: this
// seems odd, note sure if it's correct.
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
@@ -595,7 +677,7 @@ func (w *Watcher) readEvents() {
case in := <-w.input:
switch in.op {
case opAddWatch:
in.reply <- w.addWatch(in.path, uint64(in.flags))
in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize)
case opRemoveWatch:
in.reply <- w.remWatch(in.path)
}
@@ -605,6 +687,8 @@ func (w *Watcher) readEvents() {
}
switch qErr {
case nil:
// No error
case windows.ERROR_MORE_DATA:
if watch == nil {
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
@@ -626,13 +710,12 @@ func (w *Watcher) readEvents() {
default:
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
continue
case nil:
}
var offset uint32
for {
if n == 0 {
w.sendError(errors.New("short read in readEvents()"))
w.sendError(ErrEventOverflow)
break
}
@@ -703,8 +786,9 @@ func (w *Watcher) readEvents() {
// Error!
if offset >= n {
//lint:ignore ST1005 Windows should be capitalized
w.sendError(errors.New(
"Windows system assumed buffer larger than it is, events have likely been missed."))
"Windows system assumed buffer larger than it is, events have likely been missed"))
break
}
}
@@ -720,9 +804,6 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
if mask&sysFSMODIFY != 0 {
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
}
if mask&sysFSATTRIB != 0 {
m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES
}
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
}

View File

@@ -1,13 +1,18 @@
//go:build !plan9
// +build !plan9
// Package fsnotify provides a cross-platform interface for file system
// notifications.
//
// Currently supported systems:
//
// Linux 2.6.32+ via inotify
// BSD, macOS via kqueue
// Windows via ReadDirectoryChangesW
// illumos via FEN
package fsnotify
import (
"errors"
"fmt"
"path/filepath"
"strings"
)
@@ -33,34 +38,52 @@ type Op uint32
// The operations fsnotify can trigger; see the documentation on [Watcher] for a
// full description, and check them with [Event.Has].
const (
// A new pathname was created.
Create Op = 1 << iota
// The pathname was written to; this does *not* mean the write has finished,
// and a write can be followed by more writes.
Write
// The path was removed; any watches on it will be removed. Some "remove"
// operations may trigger a Rename if the file is actually moved (for
// example "remove to trash" is often a rename).
Remove
// The path was renamed to something else; any watched on it will be
// removed.
Rename
// File attributes were changed.
//
// It's generally not recommended to take action on this event, as it may
// get triggered very frequently by some software. For example, Spotlight
// indexing on macOS, anti-virus software, backup software, etc.
Chmod
)
// Common errors that can be reported by a watcher
// Common errors that can be reported.
var (
ErrNonExistentWatch = errors.New("can't remove non-existent watcher")
ErrEventOverflow = errors.New("fsnotify queue overflow")
ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
ErrClosed = errors.New("fsnotify: watcher already closed")
)
func (op Op) String() string {
func (o Op) String() string {
var b strings.Builder
if op.Has(Create) {
if o.Has(Create) {
b.WriteString("|CREATE")
}
if op.Has(Remove) {
if o.Has(Remove) {
b.WriteString("|REMOVE")
}
if op.Has(Write) {
if o.Has(Write) {
b.WriteString("|WRITE")
}
if op.Has(Rename) {
if o.Has(Rename) {
b.WriteString("|RENAME")
}
if op.Has(Chmod) {
if o.Has(Chmod) {
b.WriteString("|CHMOD")
}
if b.Len() == 0 {
@@ -70,7 +93,7 @@ func (op Op) String() string {
}
// Has reports if this operation has the given operation.
func (o Op) Has(h Op) bool { return o&h == h }
func (o Op) Has(h Op) bool { return o&h != 0 }
// Has reports if this event has the given operation.
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
@@ -79,3 +102,45 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) }
func (e Event) String() string {
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
}
type (
addOpt func(opt *withOpts)
withOpts struct {
bufsize int
}
)
var defaultOpts = withOpts{
bufsize: 65536, // 64K
}
func getOptions(opts ...addOpt) withOpts {
with := defaultOpts
for _, o := range opts {
o(&with)
}
return with
}
// WithBufferSize sets the [ReadDirectoryChangesW] buffer size.
//
// This only has effect on Windows systems, and is a no-op for other backends.
//
// The default value is 64K (65536 bytes) which is the highest value that works
// on all filesystems and should be enough for most applications, but if you
// have a large burst of events it may not be enough. You can increase it if
// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]).
//
// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
func WithBufferSize(bytes int) addOpt {
return func(opt *withOpts) { opt.bufsize = bytes }
}
// Check if this path is recursive (ends with "/..." or "\..."), and return the
// path with the /... stripped.
func recursivePath(path string) (string, bool) {
if filepath.Base(path) == "..." {
return filepath.Dir(path), true
}
return path, false
}

View File

@@ -2,8 +2,8 @@
[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
setopt err_exit no_unset pipefail extended_glob
# Simple script to update the godoc comments on all watchers. Probably took me
# more time to write this than doing it manually, but ah well 🙃
# Simple script to update the godoc comments on all watchers so you don't need
# to update the same comment 5 times.
watcher=$(<<EOF
// Watcher watches a set of paths, delivering events on a channel.
@@ -16,9 +16,9 @@ watcher=$(<<EOF
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
@@ -32,16 +32,16 @@ watcher=$(<<EOF
//
// To increase them you can use sysctl or write the value to the /proc file:
//
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
@@ -57,14 +57,20 @@ watcher=$(<<EOF
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
// # macOS notes
// # Windows notes
//
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
// Settings" until we have a native FSEvents implementation (see [#11]).
// Paths can be added as "C:\\path\\to\\dir", but forward slashes
// ("C:/path/to/dir") will also work.
//
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
// When a watched directory is removed it will always send an event for the
// directory itself, but may not send events for all files in that directory.
// Sometimes it will send events for all times, sometimes it will send no
// events, and often only for some files.
//
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
// value that is guaranteed to work with SMB filesystems. If you have many
// events in quick succession this may not be enough, and you will have to use
// [WithBufferSize] to increase the value.
EOF
)
@@ -73,20 +79,36 @@ new=$(<<EOF
EOF
)
newbuffered=$(<<EOF
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
// channel.
//
// The main use case for this is situations with a very large number of events
// where the kernel buffer size can't be increased (e.g. due to lack of
// permissions). An unbuffered Watcher will perform better for almost all use
// cases, and whenever possible you will be better off increasing the kernel
// buffers instead of adding a large userspace buffer.
EOF
)
add=$(<<EOF
// Add starts monitoring the path for changes.
//
// A path can only be watched once; attempting to watch it more than once will
// return an error. Paths that do not yet exist on the filesystem cannot be
// added. A watch will be automatically removed if the path is deleted.
// A path can only be watched once; watching it more than once is a no-op and will
// not return an error. Paths that do not yet exist on the filesystem cannot be
// watched.
//
// A path will remain watched if it gets renamed to somewhere else on the same
// filesystem, but the monitor will get removed if the path gets deleted and
// re-created, or if it's moved to a different filesystem.
// A watch will be automatically removed if the watched path is deleted or
// renamed. The exception is the Windows backend, which doesn't remove the
// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
// Returns [ErrClosed] if [Watcher.Close] was called.
//
// See [Watcher.AddWith] for a version that allows adding options.
//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
@@ -96,14 +118,27 @@ add=$(<<EOF
// # Watching files
//
// Watching individual files (rather than directories) is generally not
// recommended as many tools update files atomically. Instead of "just" writing
// to the file a temporary file will be written to first, and if successful the
// temporary file is moved to to destination removing the original, or some
// variant thereof. The watcher on the original file is now lost, as it no
// longer exists.
// recommended as many programs (especially editors) update files atomically: it
// will write to a temporary file which is then moved to to destination,
// overwriting the original (or some variant thereof). The watcher on the
// original file is now lost, as that no longer exists.
//
// Instead, watch the parent directory and use Event.Name to filter out files
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
// The upshot of this is that a power failure or crash won't leave a
// half-written file.
//
// Watch the parent directory and use Event.Name to filter out files you're not
// interested in. There is an example of this in cmd/fsnotify/file.go.
EOF
)
addwith=$(<<EOF
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
// the defaults described below are used.
//
// Possible options are:
//
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
// other platforms. The default is 64K (65536 bytes).
EOF
)
@@ -114,16 +149,21 @@ remove=$(<<EOF
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
//
// Returns nil if [Watcher.Close] was called.
EOF
)
close=$(<<EOF
// Close removes all watches and closes the events channel.
// Close removes all watches and closes the Events channel.
EOF
)
watchlist=$(<<EOF
// WatchList returns all paths added with [Add] (and are not yet removed).
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
// yet removed).
//
// Returns nil if [Watcher.Close] was called.
EOF
)
@@ -153,20 +193,29 @@ events=$(<<EOF
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
// you may get hundreds of Write events, so you
// probably want to wait until you've stopped receiving
// them (see the dedup example in cmd/fsnotify).
// you may get hundreds of Write events, and you may
// want to wait until you've stopped receiving them
// (see the dedup example in cmd/fsnotify).
//
// Some systems may send Write event for directories
// when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
// and on kqueue when a file is truncated. On Windows
// it's never sent.
// when a file is truncated. On Windows it's never
// sent.
EOF
)
errors=$(<<EOF
// Errors sends any errors.
//
// ErrEventOverflow is used to indicate there are too many events:
//
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
// - kqueue, fen: Not used.
EOF
)
@@ -200,7 +249,9 @@ set-cmt() {
set-cmt '^type Watcher struct ' $watcher
set-cmt '^func NewWatcher(' $new
set-cmt '^func NewBufferedWatcher(' $newbuffered
set-cmt '^func (w \*Watcher) Add(' $add
set-cmt '^func (w \*Watcher) AddWith(' $addwith
set-cmt '^func (w \*Watcher) Remove(' $remove
set-cmt '^func (w \*Watcher) Close(' $close
set-cmt '^func (w \*Watcher) WatchList(' $watchlist

View File

@@ -15,6 +15,7 @@ go_library(
"macro.go",
"options.go",
"program.go",
"validator.go",
],
importpath = "github.com/google/cel-go/cel",
visibility = ["//visibility:public"],
@@ -22,15 +23,18 @@ go_library(
"//checker:go_default_library",
"//checker/decls:go_default_library",
"//common:go_default_library",
"//common/ast:go_default_library",
"//common/containers:go_default_library",
"//common/decls:go_default_library",
"//common/functions:go_default_library",
"//common/operators:go_default_library",
"//common/overloads:go_default_library",
"//common/stdlib:go_default_library",
"//common/types:go_default_library",
"//common/types/pb:go_default_library",
"//common/types/ref:go_default_library",
"//common/types/traits:go_default_library",
"//interpreter:go_default_library",
"//interpreter/functions:go_default_library",
"//parser:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
@@ -72,6 +76,8 @@ go_test(
"@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//encoding/prototext:go_default_library",
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
"@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
],
)

File diff suppressed because it is too large Load Diff

View File

@@ -16,13 +16,14 @@ package cel
import (
"errors"
"fmt"
"sync"
"github.com/google/cel-go/checker"
"github.com/google/cel-go/checker/decls"
chkdecls "github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common"
celast "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/containers"
"github.com/google/cel-go/common/decls"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/interpreter"
@@ -40,8 +41,8 @@ type Ast struct {
expr *exprpb.Expr
info *exprpb.SourceInfo
source Source
refMap map[int64]*exprpb.Reference
typeMap map[int64]*exprpb.Type
refMap map[int64]*celast.ReferenceInfo
typeMap map[int64]*types.Type
}
// Expr returns the proto serializable instance of the parsed/checked expression.
@@ -60,21 +61,26 @@ func (ast *Ast) SourceInfo() *exprpb.SourceInfo {
}
// ResultType returns the output type of the expression if the Ast has been type-checked, else
// returns decls.Dyn as the parse step cannot infer the type.
// returns chkdecls.Dyn as the parse step cannot infer the type.
//
// Deprecated: use OutputType
func (ast *Ast) ResultType() *exprpb.Type {
if !ast.IsChecked() {
return decls.Dyn
return chkdecls.Dyn
}
return ast.typeMap[ast.expr.GetId()]
out := ast.OutputType()
t, err := TypeToExprType(out)
if err != nil {
return chkdecls.Dyn
}
return t
}
// OutputType returns the output type of the expression if the Ast has been type-checked, else
// returns cel.DynType as the parse step cannot infer types.
func (ast *Ast) OutputType() *Type {
t, err := ExprTypeToType(ast.ResultType())
if err != nil {
t, found := ast.typeMap[ast.expr.GetId()]
if !found {
return DynType
}
return t
@@ -87,22 +93,33 @@ func (ast *Ast) Source() Source {
}
// FormatType converts a type message into a string representation.
//
// Deprecated: prefer FormatCELType
func FormatType(t *exprpb.Type) string {
return checker.FormatCheckedType(t)
}
// FormatCELType formats a cel.Type value to a string representation.
//
// The type formatting is identical to FormatType.
func FormatCELType(t *Type) string {
return checker.FormatCELType(t)
}
// Env encapsulates the context necessary to perform parsing, type checking, or generation of
// evaluable programs for different expressions.
type Env struct {
Container *containers.Container
functions map[string]*functionDecl
declarations []*exprpb.Decl
variables []*decls.VariableDecl
functions map[string]*decls.FunctionDecl
macros []parser.Macro
adapter ref.TypeAdapter
provider ref.TypeProvider
adapter types.Adapter
provider types.Provider
features map[int]bool
appliedFeatures map[int]bool
libraries map[string]bool
validators []ASTValidator
costOptions []checker.CostOption
// Internal parser representation
prsr *parser.Parser
@@ -154,8 +171,8 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) {
return nil, err
}
return (&Env{
declarations: []*exprpb.Decl{},
functions: map[string]*functionDecl{},
variables: []*decls.VariableDecl{},
functions: map[string]*decls.FunctionDecl{},
macros: []parser.Macro{},
Container: containers.DefaultContainer,
adapter: registry,
@@ -163,14 +180,20 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) {
features: map[int]bool{},
appliedFeatures: map[int]bool{},
libraries: map[string]bool{},
validators: []ASTValidator{},
progOpts: []ProgramOption{},
costOptions: []checker.CostOption{},
}).configure(opts)
}
// Check performs type-checking on the input Ast and yields a checked Ast and/or set of Issues.
// If any `ASTValidators` are configured on the environment, they will be applied after a valid
// type-check result. If any issues are detected, the validators will provide them on the
// output Issues object.
//
// Checking has failed if the returned Issues value and its Issues.Err() value are non-nil.
// Issues should be inspected if they are non-nil, but may not represent a fatal error.
// Either checking or validation has failed if the returned Issues value and its Issues.Err()
// value are non-nil. Issues should be inspected if they are non-nil, but may not represent a
// fatal error.
//
// It is possible to have both non-nil Ast and Issues values returned from this call: however,
// the mere presence of an Ast does not imply that it is valid for use.
@@ -183,21 +206,38 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) {
if err != nil {
errs := common.NewErrors(ast.Source())
errs.ReportError(common.NoLocation, err.Error())
return nil, NewIssues(errs)
return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo())
}
res, errs := checker.Check(pe, ast.Source(), chk)
if len(errs.GetErrors()) > 0 {
return nil, NewIssues(errs)
return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo())
}
// Manually create the Ast to ensure that the Ast source information (which may be more
// detailed than the information provided by Check), is returned to the caller.
return &Ast{
ast = &Ast{
source: ast.Source(),
expr: res.GetExpr(),
info: res.GetSourceInfo(),
refMap: res.GetReferenceMap(),
typeMap: res.GetTypeMap()}, nil
expr: res.Expr,
info: res.SourceInfo,
refMap: res.ReferenceMap,
typeMap: res.TypeMap}
// Generate a validator configuration from the set of configured validators.
vConfig := newValidatorConfig()
for _, v := range e.validators {
if cv, ok := v.(ASTValidatorConfigurer); ok {
cv.Configure(vConfig)
}
}
// Apply additional validators on the type-checked result.
iss := NewIssuesWithSourceInfo(errs, ast.SourceInfo())
for _, v := range e.validators {
v.Validate(e, vConfig, res, iss)
}
if iss.Err() != nil {
return nil, iss
}
return ast, nil
}
// Compile combines the Parse and Check phases CEL program compilation to produce an Ast and
@@ -255,7 +295,7 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
copy(chkOptsCopy, e.chkOpts)
// Copy the declarations if needed.
decsCopy := []*exprpb.Decl{}
varsCopy := []*decls.VariableDecl{}
if chk != nil {
// If the type-checker has already been instantiated, then the e.declarations have been
// validated within the chk instance.
@@ -263,8 +303,8 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
} else {
// If the type-checker has not been instantiated, ensure the unvalidated declarations are
// provided to the extended Env instance.
decsCopy = make([]*exprpb.Decl, len(e.declarations))
copy(decsCopy, e.declarations)
varsCopy = make([]*decls.VariableDecl, len(e.variables))
copy(varsCopy, e.variables)
}
// Copy macros and program options
@@ -276,8 +316,8 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
// Copy the adapter / provider if they appear to be mutable.
adapter := e.adapter
provider := e.provider
adapterReg, isAdapterReg := e.adapter.(ref.TypeRegistry)
providerReg, isProviderReg := e.provider.(ref.TypeRegistry)
adapterReg, isAdapterReg := e.adapter.(*types.Registry)
providerReg, isProviderReg := e.provider.(*types.Registry)
// In most cases the provider and adapter will be a ref.TypeRegistry;
// however, in the rare cases where they are not, they are assumed to
// be immutable. Since it is possible to set the TypeProvider separately
@@ -308,7 +348,7 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
for k, v := range e.appliedFeatures {
appliedFeaturesCopy[k] = v
}
funcsCopy := make(map[string]*functionDecl, len(e.functions))
funcsCopy := make(map[string]*decls.FunctionDecl, len(e.functions))
for k, v := range e.functions {
funcsCopy[k] = v
}
@@ -316,10 +356,14 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
for k, v := range e.libraries {
libsCopy[k] = v
}
validatorsCopy := make([]ASTValidator, len(e.validators))
copy(validatorsCopy, e.validators)
costOptsCopy := make([]checker.CostOption, len(e.costOptions))
copy(costOptsCopy, e.costOptions)
ext := &Env{
Container: e.Container,
declarations: decsCopy,
variables: varsCopy,
functions: funcsCopy,
macros: macsCopy,
progOpts: progOptsCopy,
@@ -327,9 +371,11 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
features: featuresCopy,
appliedFeatures: appliedFeaturesCopy,
libraries: libsCopy,
validators: validatorsCopy,
provider: provider,
chkOpts: chkOptsCopy,
prsrOpts: prsrOptsCopy,
costOptions: costOptsCopy,
}
return ext.configure(opts)
}
@@ -347,6 +393,25 @@ func (e *Env) HasLibrary(libName string) bool {
return exists && configured
}
// Libraries returns a list of SingletonLibrary that have been configured in the environment.
func (e *Env) Libraries() []string {
libraries := make([]string, 0, len(e.libraries))
for libName := range e.libraries {
libraries = append(libraries, libName)
}
return libraries
}
// HasValidator returns whether a specific ASTValidator has been configured in the environment.
func (e *Env) HasValidator(name string) bool {
for _, v := range e.validators {
if v.Name() == name {
return true
}
}
return false
}
// Parse parses the input expression value `txt` to a Ast and/or a set of Issues.
//
// This form of Parse creates a Source value for the input `txt` and forwards to the
@@ -388,36 +453,64 @@ func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) {
return newProgram(e, ast, optSet)
}
// CELTypeAdapter returns the `types.Adapter` configured for the environment.
func (e *Env) CELTypeAdapter() types.Adapter {
return e.adapter
}
// CELTypeProvider returns the `types.Provider` configured for the environment.
func (e *Env) CELTypeProvider() types.Provider {
return e.provider
}
// TypeAdapter returns the `ref.TypeAdapter` configured for the environment.
//
// Deprecated: use CELTypeAdapter()
func (e *Env) TypeAdapter() ref.TypeAdapter {
return e.adapter
}
// TypeProvider returns the `ref.TypeProvider` configured for the environment.
//
// Deprecated: use CELTypeProvider()
func (e *Env) TypeProvider() ref.TypeProvider {
return e.provider
if legacyProvider, ok := e.provider.(ref.TypeProvider); ok {
return legacyProvider
}
return &interopLegacyTypeProvider{Provider: e.provider}
}
// UnknownVars returns an interpreter.PartialActivation which marks all variables
// declared in the Env as unknown AttributePattern values.
// UnknownVars returns an interpreter.PartialActivation which marks all variables declared in the
// Env as unknown AttributePattern values.
//
// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation
// unless the PartialAttributes option is provided as a ProgramOption.
// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation unless the
// PartialAttributes option is provided as a ProgramOption.
func (e *Env) UnknownVars() interpreter.PartialActivation {
var unknownPatterns []*interpreter.AttributePattern
for _, d := range e.declarations {
switch d.GetDeclKind().(type) {
case *exprpb.Decl_Ident:
unknownPatterns = append(unknownPatterns,
interpreter.NewAttributePattern(d.GetName()))
}
}
part, _ := PartialVars(
interpreter.EmptyActivation(),
unknownPatterns...)
act := interpreter.EmptyActivation()
part, _ := PartialVars(act, e.computeUnknownVars(act)...)
return part
}
// PartialVars returns an interpreter.PartialActivation where all variables not in the input variable
// set, but which have been configured in the environment, are marked as unknown.
//
// The `vars` value may either be an interpreter.Activation or any valid input to the
// interpreter.NewActivation call.
//
// Note, this is equivalent to calling cel.PartialVars and manually configuring the set of unknown
// variables. For more advanced use cases of partial state where portions of an object graph, rather
// than top-level variables, are missing the PartialVars() method may be a more suitable choice.
//
// Note, the PartialVars will behave the same as an interpreter.EmptyActivation unless the
// PartialAttributes option is provided as a ProgramOption.
func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) {
act, err := interpreter.NewActivation(vars)
if err != nil {
return nil, err
}
return PartialVars(act, e.computeUnknownVars(act)...)
}
// ResidualAst takes an Ast and its EvalDetails to produce a new Ast which only contains the
// attribute references which are unknown.
//
@@ -463,11 +556,16 @@ func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
// EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and
// extension functions provided by estimator.
func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) {
checked, err := AstToCheckedExpr(ast)
if err != nil {
return checker.CostEstimate{}, fmt.Errorf("EsimateCost could not inspect Ast: %v", err)
checked := &celast.CheckedAST{
Expr: ast.Expr(),
SourceInfo: ast.SourceInfo(),
TypeMap: ast.typeMap,
ReferenceMap: ast.refMap,
}
return checker.Cost(checked, estimator, opts...)
extendedOpts := make([]checker.CostOption, 0, len(e.costOptions))
extendedOpts = append(extendedOpts, opts...)
extendedOpts = append(extendedOpts, e.costOptions...)
return checker.Cost(checked, estimator, extendedOpts...)
}
// configure applies a series of EnvOptions to the current environment.
@@ -488,14 +586,6 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
return nil, err
}
// Initialize all of the functions configured within the environment.
for _, fn := range e.functions {
err = fn.init()
if err != nil {
return nil, err
}
}
// Configure the parser.
prsrOpts := []parser.Option{}
prsrOpts = append(prsrOpts, e.prsrOpts...)
@@ -504,6 +594,9 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
if e.HasFeature(featureEnableMacroCallTracking) {
prsrOpts = append(prsrOpts, parser.PopulateMacroCalls(true))
}
if e.HasFeature(featureVariadicLogicalASTs) {
prsrOpts = append(prsrOpts, parser.EnableVariadicOperatorASTs(true))
}
e.prsr, err = parser.NewParser(prsrOpts...)
if err != nil {
return nil, err
@@ -525,8 +618,6 @@ func (e *Env) initChecker() (*checker.Env, error) {
chkOpts := []checker.Option{}
chkOpts = append(chkOpts, e.chkOpts...)
chkOpts = append(chkOpts,
checker.HomogeneousAggregateLiterals(
e.HasFeature(featureDisableDynamicAggregateLiterals)),
checker.CrossTypeNumericComparisons(
e.HasFeature(featureCrossTypeNumericComparisons)))
@@ -536,19 +627,17 @@ func (e *Env) initChecker() (*checker.Env, error) {
return
}
// Add the statically configured declarations.
err = ce.Add(e.declarations...)
err = ce.AddIdents(e.variables...)
if err != nil {
e.setCheckerOrError(nil, err)
return
}
// Add the function declarations which are derived from the FunctionDecl instances.
for _, fn := range e.functions {
fnDecl, err := functionDeclToExprDecl(fn)
if err != nil {
e.setCheckerOrError(nil, err)
return
if fn.IsDeclarationDisabled() {
continue
}
err = ce.Add(fnDecl)
err = ce.AddFunctions(fn)
if err != nil {
e.setCheckerOrError(nil, err)
return
@@ -596,17 +685,43 @@ func (e *Env) maybeApplyFeature(feature int, option EnvOption) (*Env, error) {
return e, nil
}
// computeUnknownVars determines a set of missing variables based on the input activation and the
// environment's configured declaration set.
func (e *Env) computeUnknownVars(vars interpreter.Activation) []*interpreter.AttributePattern {
var unknownPatterns []*interpreter.AttributePattern
for _, v := range e.variables {
varName := v.Name()
if _, found := vars.ResolveName(varName); found {
continue
}
unknownPatterns = append(unknownPatterns, interpreter.NewAttributePattern(varName))
}
return unknownPatterns
}
// Error type which references an expression id, a location within source, and a message.
type Error = common.Error
// Issues defines methods for inspecting the error details of parse and check calls.
//
// Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct.
type Issues struct {
errs *common.Errors
info *exprpb.SourceInfo
}
// NewIssues returns an Issues struct from a common.Errors object.
func NewIssues(errs *common.Errors) *Issues {
return NewIssuesWithSourceInfo(errs, nil)
}
// NewIssuesWithSourceInfo returns an Issues struct from a common.Errors object with SourceInfo metatata
// which can be used with the `ReportErrorAtID` method for additional error reports within the context
// information that's inferred from an expression id.
func NewIssuesWithSourceInfo(errs *common.Errors, info *exprpb.SourceInfo) *Issues {
return &Issues{
errs: errs,
info: info,
}
}
@@ -622,9 +737,9 @@ func (i *Issues) Err() error {
}
// Errors returns the collection of errors encountered in more granular detail.
func (i *Issues) Errors() []common.Error {
func (i *Issues) Errors() []*Error {
if i == nil {
return []common.Error{}
return []*Error{}
}
return i.errs.GetErrors()
}
@@ -648,6 +763,37 @@ func (i *Issues) String() string {
return i.errs.ToDisplayString()
}
// ReportErrorAtID reports an error message with an optional set of formatting arguments.
//
// The source metadata for the expression at `id`, if present, is attached to the error report.
// To ensure that source metadata is attached to error reports, use NewIssuesWithSourceInfo.
func (i *Issues) ReportErrorAtID(id int64, message string, args ...any) {
i.errs.ReportErrorAtID(id, locationByID(id, i.info), message, args...)
}
// locationByID returns a common.Location given an expression id.
//
// TODO: move this functionality into the native SourceInfo and an overhaul of the common.Source
// as this implementation relies on the abstractions present in the protobuf SourceInfo object,
// and is replicated in the checker.
func locationByID(id int64, sourceInfo *exprpb.SourceInfo) common.Location {
positions := sourceInfo.GetPositions()
var line = 1
if offset, found := positions[id]; found {
col := int(offset)
for _, lineOffset := range sourceInfo.GetLineOffsets() {
if lineOffset < offset {
line++
col = int(offset - lineOffset)
} else {
break
}
}
return common.NewLocation(line, col)
}
return common.NoLocation
}
// getStdEnv lazy initializes the CEL standard environment.
func getStdEnv() (*Env, error) {
stdEnvInit.Do(func() {
@@ -656,6 +802,90 @@ func getStdEnv() (*Env, error) {
return stdEnv, stdEnvErr
}
// interopCELTypeProvider layers support for the types.Provider interface on top of a ref.TypeProvider.
type interopCELTypeProvider struct {
ref.TypeProvider
}
// FindStructType returns a types.Type instance for the given fully-qualified typeName if one exists.
//
// This method proxies to the underyling ref.TypeProvider's FindType method and converts protobuf type
// into a native type representation. If the conversion fails, the type is listed as not found.
func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, bool) {
if et, found := p.FindType(typeName); found {
t, err := types.ExprTypeToType(et)
if err != nil {
return nil, false
}
return t, true
}
return nil, false
}
// FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field
// name, if one exists.
//
// This method proxies to the underyling ref.TypeProvider's FindFieldType method and converts protobuf type
// into a native type representation. If the conversion fails, the type is listed as not found.
func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) {
if ft, found := p.FindFieldType(structType, fieldName); found {
t, err := types.ExprTypeToType(ft.Type)
if err != nil {
return nil, false
}
return &types.FieldType{
Type: t,
IsSet: ft.IsSet,
GetFrom: ft.GetFrom,
}, true
}
return nil, false
}
// interopLegacyTypeProvider layers support for the ref.TypeProvider interface on top of a types.Provider.
type interopLegacyTypeProvider struct {
types.Provider
}
// FindType retruns the protobuf Type representation for the input type name if one exists.
//
// This method proxies to the underlying types.Provider FindStructType method and converts the types.Type
// value to a protobuf Type representation.
//
// Failure to convert the type will result in the type not being found.
func (p *interopLegacyTypeProvider) FindType(typeName string) (*exprpb.Type, bool) {
if t, found := p.FindStructType(typeName); found {
et, err := types.TypeToExprType(t)
if err != nil {
return nil, false
}
return et, true
}
return nil, false
}
// FindFieldType returns the protobuf-based FieldType representation for the input type name and field,
// if one exists.
//
// This call proxies to the types.Provider FindStructFieldType method and converts the types.FIeldType
// value to a protobuf-based ref.FieldType representation if found.
//
// Failure to convert the FieldType will result in the field not being found.
func (p *interopLegacyTypeProvider) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) {
if cft, found := p.FindStructFieldType(structType, fieldName); found {
et, err := types.TypeToExprType(cft.Type)
if err != nil {
return nil, false
}
return &ref.FieldType{
Type: et,
IsSet: cft.IsSet,
GetFrom: cft.GetFrom,
}, true
}
return nil, false
}
var (
stdEnvInit sync.Once
stdEnv *Env

View File

@@ -22,6 +22,7 @@ import (
"google.golang.org/protobuf/proto"
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
@@ -33,7 +34,8 @@ import (
// CheckedExprToAst converts a checked expression proto message to an Ast.
func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast {
return CheckedExprToAstWithSource(checkedExpr, nil)
checked, _ := CheckedExprToAstWithSource(checkedExpr, nil)
return checked
}
// CheckedExprToAstWithSource converts a checked expression proto message to an Ast,
@@ -44,29 +46,18 @@ func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast {
// through future calls.
//
// Prefer CheckedExprToAst if loading expressions from storage.
func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) *Ast {
refMap := checkedExpr.GetReferenceMap()
if refMap == nil {
refMap = map[int64]*exprpb.Reference{}
}
typeMap := checkedExpr.GetTypeMap()
if typeMap == nil {
typeMap = map[int64]*exprpb.Type{}
}
si := checkedExpr.GetSourceInfo()
if si == nil {
si = &exprpb.SourceInfo{}
}
if src == nil {
src = common.NewInfoSource(si)
func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) (*Ast, error) {
checkedAST, err := ast.CheckedExprToCheckedAST(checkedExpr)
if err != nil {
return nil, err
}
return &Ast{
expr: checkedExpr.GetExpr(),
info: si,
expr: checkedAST.Expr,
info: checkedAST.SourceInfo,
source: src,
refMap: refMap,
typeMap: typeMap,
}
refMap: checkedAST.ReferenceMap,
typeMap: checkedAST.TypeMap,
}, nil
}
// AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value.
@@ -76,12 +67,13 @@ func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) {
if !a.IsChecked() {
return nil, fmt.Errorf("cannot convert unchecked ast")
}
return &exprpb.CheckedExpr{
Expr: a.Expr(),
SourceInfo: a.SourceInfo(),
cAst := &ast.CheckedAST{
Expr: a.expr,
SourceInfo: a.info,
ReferenceMap: a.refMap,
TypeMap: a.typeMap,
}, nil
}
return ast.CheckedASTToCheckedExpr(cAst)
}
// ParsedExprToAst converts a parsed expression proto message to an Ast.
@@ -202,7 +194,7 @@ func RefValueToValue(res ref.Val) (*exprpb.Value, error) {
}
var (
typeNameToTypeValue = map[string]*types.TypeValue{
typeNameToTypeValue = map[string]ref.Val{
"bool": types.BoolType,
"bytes": types.BytesType,
"double": types.DoubleType,
@@ -219,7 +211,7 @@ var (
)
// ValueToRefValue converts between exprpb.Value and ref.Val.
func ValueToRefValue(adapter ref.TypeAdapter, v *exprpb.Value) (ref.Val, error) {
func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) {
switch v.Kind.(type) {
case *exprpb.Value_NullValue:
return types.NullValue, nil

View File

@@ -15,19 +15,18 @@
package cel
import (
"math"
"strconv"
"strings"
"time"
"github.com/google/cel-go/checker"
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/stdlib"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
"github.com/google/cel-go/interpreter"
"github.com/google/cel-go/interpreter/functions"
"github.com/google/cel-go/parser"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
@@ -35,6 +34,7 @@ import (
const (
optMapMacro = "optMap"
optFlatMapMacro = "optFlatMap"
hasValueFunc = "hasValue"
optionalNoneFunc = "optional.none"
optionalOfFunc = "optional.of"
@@ -106,44 +106,213 @@ func (stdLibrary) LibraryName() string {
return "cel.lib.std"
}
// EnvOptions returns options for the standard CEL function declarations and macros.
// CompileOptions returns options for the standard CEL function declarations and macros.
func (stdLibrary) CompileOptions() []EnvOption {
return []EnvOption{
Declarations(checker.StandardDeclarations()...),
func(e *Env) (*Env, error) {
var err error
for _, fn := range stdlib.Functions() {
existing, found := e.functions[fn.Name()]
if found {
fn, err = existing.Merge(fn)
if err != nil {
return nil, err
}
}
e.functions[fn.Name()] = fn
}
return e, nil
},
func(e *Env) (*Env, error) {
e.variables = append(e.variables, stdlib.Types()...)
return e, nil
},
Macros(StandardMacros...),
}
}
// ProgramOptions returns function implementations for the standard CEL functions.
func (stdLibrary) ProgramOptions() []ProgramOption {
return []ProgramOption{
Functions(functions.StandardOverloads()...),
return []ProgramOption{}
}
// OptionalTypes enable support for optional syntax and types in CEL.
//
// The optional value type makes it possible to express whether variables have
// been provided, whether a result has been computed, and in the future whether
// an object field path, map key value, or list index has a value.
//
// # Syntax Changes
//
// OptionalTypes are unlike other CEL extensions because they modify the CEL
// syntax itself, notably through the use of a `?` preceding a field name or
// index value.
//
// ## Field Selection
//
// The optional syntax in field selection is denoted as `obj.?field`. In other
// words, if a field is set, return `optional.of(obj.field)“, else
// `optional.none()`. The optional field selection is viral in the sense that
// after the first optional selection all subsequent selections or indices
// are treated as optional, i.e. the following expressions are equivalent:
//
// obj.?field.subfield
// obj.?field.?subfield
//
// ## Indexing
//
// Similar to field selection, the optional syntax can be used in index
// expressions on maps and lists:
//
// list[?0]
// map[?key]
//
// ## Optional Field Setting
//
// When creating map or message literals, if a field may be optionally set
// based on its presence, then placing a `?` before the field name or key
// will ensure the type on the right-hand side must be optional(T) where T
// is the type of the field or key-value.
//
// The following returns a map with the key expression set only if the
// subfield is present, otherwise an empty map is created:
//
// {?key: obj.?field.subfield}
//
// ## Optional Element Setting
//
// When creating list literals, an element in the list may be optionally added
// when the element expression is preceded by a `?`:
//
// [a, ?b, ?c] // return a list with either [a], [a, b], [a, b, c], or [a, c]
//
// # Optional.Of
//
// Create an optional(T) value of a given value with type T.
//
// optional.of(10)
//
// # Optional.OfNonZeroValue
//
// Create an optional(T) value of a given value with type T if it is not a
// zero-value. A zero-value the default empty value for any given CEL type,
// including empty protobuf message types. If the value is empty, the result
// of this call will be optional.none().
//
// optional.ofNonZeroValue([1, 2, 3]) // optional(list(int))
// optional.ofNonZeroValue([]) // optional.none()
// optional.ofNonZeroValue(0) // optional.none()
// optional.ofNonZeroValue("") // optional.none()
//
// # Optional.None
//
// Create an empty optional value.
//
// # HasValue
//
// Determine whether the optional contains a value.
//
// optional.of(b'hello').hasValue() // true
// optional.ofNonZeroValue({}).hasValue() // false
//
// # Value
//
// Get the value contained by the optional. If the optional does not have a
// value, the result will be a CEL error.
//
// optional.of(b'hello').value() // b'hello'
// optional.ofNonZeroValue({}).value() // error
//
// # Or
//
// If the value on the left-hand side is optional.none(), the optional value
// on the right hand side is returned. If the value on the left-hand set is
// valued, then it is returned. This operation is short-circuiting and will
// only evaluate as many links in the `or` chain as are needed to return a
// non-empty optional value.
//
// obj.?field.or(m[?key])
// l[?index].or(obj.?field.subfield).or(obj.?other)
//
// # OrValue
//
// Either return the value contained within the optional on the left-hand side
// or return the alternative value on the right hand side.
//
// m[?key].orValue("none")
//
// # OptMap
//
// Apply a transformation to the optional's underlying value if it is not empty
// and return an optional typed result based on the transformation. The
// transformation expression type must return a type T which is wrapped into
// an optional.
//
// msg.?elements.optMap(e, e.size()).orValue(0)
//
// # OptFlatMap
//
// Introduced in version: 1
//
// Apply a transformation to the optional's underlying value if it is not empty
// and return the result. The transform expression must return an optional(T)
// rather than type T. This can be useful when dealing with zero values and
// conditionally generating an empty or non-empty result in ways which cannot
// be expressed with `optMap`.
//
// msg.?elements.optFlatMap(e, e[?0]) // return the first element if present.
func OptionalTypes(opts ...OptionalTypesOption) EnvOption {
lib := &optionalLib{version: math.MaxUint32}
for _, opt := range opts {
lib = opt(lib)
}
return Lib(lib)
}
type optionalLib struct {
version uint32
}
// OptionalTypesOption is a functional interface for configuring the strings library.
type OptionalTypesOption func(*optionalLib) *optionalLib
// OptionalTypesVersion configures the version of the optional type library.
//
// The version limits which functions are available. Only functions introduced
// below or equal to the given version included in the library. If this option
// is not set, all functions are available.
//
// See the library documentation to determine which version a function was introduced.
// If the documentation does not state which version a function was introduced, it can
// be assumed to be introduced at version 0, when the library was first created.
func OptionalTypesVersion(version uint32) OptionalTypesOption {
return func(lib *optionalLib) *optionalLib {
lib.version = version
return lib
}
}
type optionalLibrary struct{}
// LibraryName implements the SingletonLibrary interface method.
func (optionalLibrary) LibraryName() string {
func (lib *optionalLib) LibraryName() string {
return "cel.lib.optional"
}
// CompileOptions implements the Library interface method.
func (optionalLibrary) CompileOptions() []EnvOption {
func (lib *optionalLib) CompileOptions() []EnvOption {
paramTypeK := TypeParamType("K")
paramTypeV := TypeParamType("V")
optionalTypeV := OptionalType(paramTypeV)
listTypeV := ListType(paramTypeV)
mapTypeKV := MapType(paramTypeK, paramTypeV)
return []EnvOption{
opts := []EnvOption{
// Enable the optional syntax in the parser.
enableOptionalSyntax(),
// Introduce the optional type.
Types(types.OptionalType),
// Configure the optMap macro.
// Configure the optMap and optFlatMap macros.
Macros(NewReceiverMacro(optMapMacro, 2, optMap)),
// Global and member functions for working with optional values.
@@ -202,21 +371,29 @@ func (optionalLibrary) CompileOptions() []EnvOption {
// Index overloads to accommodate using an optional value as the operand.
Function(operators.Index,
Overload("optional_list_index_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
Overload("optional_map_index_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
Overload("optional_map_index_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
}
if lib.version >= 1 {
opts = append(opts, Macros(NewReceiverMacro(optFlatMapMacro, 2, optFlatMap)))
}
return opts
}
// ProgramOptions implements the Library interface method.
func (lib *optionalLib) ProgramOptions() []ProgramOption {
return []ProgramOption{
CustomDecorator(decorateOptionalOr),
}
}
func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
varIdent := args[0]
varName := ""
switch varIdent.GetExprKind().(type) {
case *exprpb.Expr_IdentExpr:
varName = varIdent.GetIdentExpr().GetName()
default:
return nil, &common.Error{
Message: "optMap() variable name must be a simple identifier",
Location: meh.OffsetLocation(varIdent.GetId()),
}
return nil, meh.NewError(varIdent.GetId(), "optMap() variable name must be a simple identifier")
}
mapExpr := args[1]
return meh.GlobalCall(
@@ -237,11 +414,30 @@ func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exp
), nil
}
// ProgramOptions implements the Library interface method.
func (optionalLibrary) ProgramOptions() []ProgramOption {
return []ProgramOption{
CustomDecorator(decorateOptionalOr),
func optFlatMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
varIdent := args[0]
varName := ""
switch varIdent.GetExprKind().(type) {
case *exprpb.Expr_IdentExpr:
varName = varIdent.GetIdentExpr().GetName()
default:
return nil, meh.NewError(varIdent.GetId(), "optFlatMap() variable name must be a simple identifier")
}
mapExpr := args[1]
return meh.GlobalCall(
operators.Conditional,
meh.ReceiverCall(hasValueFunc, target),
meh.Fold(
unusedIterVar,
meh.NewList(),
varName,
meh.ReceiverCall(valueFunc, target),
meh.LiteralBool(false),
meh.Ident(varName),
mapExpr,
),
meh.GlobalCall(optionalNoneFunc),
), nil
}
func enableOptionalSyntax() EnvOption {
@@ -358,28 +554,16 @@ var (
timeOverloadDeclarations = []EnvOption{
Function(overloads.TimeGetHours,
MemberOverload(overloads.DurationToHours, []*Type{DurationType}, IntType,
UnaryBinding(func(dur ref.Val) ref.Val {
d := dur.(types.Duration)
return types.Int(d.Hours())
}))),
UnaryBinding(types.DurationGetHours))),
Function(overloads.TimeGetMinutes,
MemberOverload(overloads.DurationToMinutes, []*Type{DurationType}, IntType,
UnaryBinding(func(dur ref.Val) ref.Val {
d := dur.(types.Duration)
return types.Int(d.Minutes())
}))),
UnaryBinding(types.DurationGetMinutes))),
Function(overloads.TimeGetSeconds,
MemberOverload(overloads.DurationToSeconds, []*Type{DurationType}, IntType,
UnaryBinding(func(dur ref.Val) ref.Val {
d := dur.(types.Duration)
return types.Int(d.Seconds())
}))),
UnaryBinding(types.DurationGetSeconds))),
Function(overloads.TimeGetMilliseconds,
MemberOverload(overloads.DurationToMilliseconds, []*Type{DurationType}, IntType,
UnaryBinding(func(dur ref.Val) ref.Val {
d := dur.(types.Duration)
return types.Int(d.Milliseconds())
}))),
UnaryBinding(types.DurationGetMilliseconds))),
Function(overloads.TimeGetFullYear,
MemberOverload(overloads.TimestampToYear, []*Type{TimestampType}, IntType,
UnaryBinding(func(ts ref.Val) ref.Val {

View File

@@ -15,7 +15,6 @@
package cel
import (
"github.com/google/cel-go/common"
"github.com/google/cel-go/parser"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
@@ -63,21 +62,21 @@ func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro {
}
// HasMacroExpander expands the input call arguments into a presence test, e.g. has(<operand>.field)
func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
return parser.MakeHas(meh, target, args)
}
// ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the
// elements in the range match the predicate expressions:
// <iterRange>.exists(<iterVar>, <predicate>)
func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
return parser.MakeExists(meh, target, args)
}
// ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly
// one of the elements in the range match the predicate expressions:
// <iterRange>.exists_one(<iterVar>, <predicate>)
func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
return parser.MakeExistsOne(meh, target, args)
}
@@ -91,14 +90,14 @@ func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*ex
//
// In the second form only iterVar values which return true when provided to the predicate expression
// are transformed.
func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
return parser.MakeMap(meh, target, args)
}
// FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains
// only elements which match the provided predicate expression:
// <iterRange>.filter(<iterVar>, <predicate>)
func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) {
return parser.MakeFilter(meh, target, args)
}

View File

@@ -23,12 +23,13 @@ import (
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/types/dynamicpb"
"github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/checker"
"github.com/google/cel-go/common/containers"
"github.com/google/cel-go/common/functions"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/pb"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/interpreter"
"github.com/google/cel-go/interpreter/functions"
"github.com/google/cel-go/parser"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
@@ -41,13 +42,6 @@ import (
const (
_ = iota
// Disallow heterogeneous aggregate (list, map) literals.
// Note, it is still possible to have heterogeneous aggregates when
// provided as variables to the expression, as well as via conversion
// of well-known dynamic types, or with unchecked expressions.
// Affects checking. Provides a subset of standard behavior.
featureDisableDynamicAggregateLiterals
// Enable the tracking of function call expressions replaced by macros.
featureEnableMacroCallTracking
@@ -63,9 +57,10 @@ const (
// is not already in UTC.
featureDefaultUTCTimeZone
// Enable the use of optional types in the syntax, type-system, type-checking,
// and runtime.
featureOptionalTypes
// Enable the serialization of logical operator ASTs as variadic calls, thus
// compressing the logic graph to a single call when multiple like-operator
// expressions occur: e.g. a && b && c && d -> call(_&&_, [a, b, c, d])
featureVariadicLogicalASTs
)
// EnvOption is a functional interface for configuring the environment.
@@ -82,23 +77,26 @@ func ClearMacros() EnvOption {
}
}
// CustomTypeAdapter swaps the default ref.TypeAdapter implementation with a custom one.
// CustomTypeAdapter swaps the default types.Adapter implementation with a custom one.
//
// Note: This option must be specified before the Types and TypeDescs options when used together.
func CustomTypeAdapter(adapter ref.TypeAdapter) EnvOption {
func CustomTypeAdapter(adapter types.Adapter) EnvOption {
return func(e *Env) (*Env, error) {
e.adapter = adapter
return e, nil
}
}
// CustomTypeProvider swaps the default ref.TypeProvider implementation with a custom one.
// CustomTypeProvider replaces the types.Provider implementation with a custom one.
//
// The `provider` variable type may either be types.Provider or ref.TypeProvider (deprecated)
//
// Note: This option must be specified before the Types and TypeDescs options when used together.
func CustomTypeProvider(provider ref.TypeProvider) EnvOption {
func CustomTypeProvider(provider any) EnvOption {
return func(e *Env) (*Env, error) {
e.provider = provider
return e, nil
var err error
e.provider, err = maybeInteropProvider(provider)
return e, err
}
}
@@ -108,8 +106,28 @@ func CustomTypeProvider(provider ref.TypeProvider) EnvOption {
// for the environment. The NewEnv call builds on top of the standard CEL declarations. For a
// purely custom set of declarations use NewCustomEnv.
func Declarations(decls ...*exprpb.Decl) EnvOption {
declOpts := []EnvOption{}
var err error
var opt EnvOption
// Convert the declarations to `EnvOption` values ahead of time.
// Surface any errors in conversion when the options are applied.
for _, d := range decls {
opt, err = ExprDeclToDeclaration(d)
if err != nil {
break
}
declOpts = append(declOpts, opt)
}
return func(e *Env) (*Env, error) {
e.declarations = append(e.declarations, decls...)
if err != nil {
return nil, err
}
for _, o := range declOpts {
e, err = o(e)
if err != nil {
return nil, err
}
}
return e, nil
}
}
@@ -126,14 +144,25 @@ func EagerlyValidateDeclarations(enabled bool) EnvOption {
return features(featureEagerlyValidateDeclarations, enabled)
}
// HomogeneousAggregateLiterals option ensures that list and map literal entry types must agree
// during type-checking.
// HomogeneousAggregateLiterals disables mixed type list and map literal values.
//
// Note, it is still possible to have heterogeneous aggregates when provided as variables to the
// expression, as well as via conversion of well-known dynamic types, or with unchecked
// expressions.
func HomogeneousAggregateLiterals() EnvOption {
return features(featureDisableDynamicAggregateLiterals, true)
return ASTValidators(ValidateHomogeneousAggregateLiterals())
}
// variadicLogicalOperatorASTs flatten like-operator chained logical expressions into a single
// variadic call with N-terms. This behavior is useful when serializing to a protocol buffer as
// it will reduce the number of recursive calls needed to deserialize the AST later.
//
// For example, given the following expression the call graph will be rendered accordingly:
//
// expression: a && b && c && (d || e)
// ast: call(_&&_, [a, b, c, call(_||_, [d, e])])
func variadicLogicalOperatorASTs() EnvOption {
return features(featureVariadicLogicalASTs, true)
}
// Macros option extends the macro set configured in the environment.
@@ -226,7 +255,12 @@ func Abbrevs(qualifiedNames ...string) EnvOption {
// Note: This option must be specified after the CustomTypeProvider option when used together.
func Types(addTypes ...any) EnvOption {
return func(e *Env) (*Env, error) {
reg, isReg := e.provider.(ref.TypeRegistry)
var reg ref.TypeRegistry
var isReg bool
reg, isReg = e.provider.(*types.Registry)
if !isReg {
reg, isReg = e.provider.(ref.TypeRegistry)
}
if !isReg {
return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
}
@@ -436,6 +470,24 @@ func InterruptCheckFrequency(checkFrequency uint) ProgramOption {
}
}
// CostEstimatorOptions configure type-check time options for estimating expression cost.
func CostEstimatorOptions(costOpts ...checker.CostOption) EnvOption {
return func(e *Env) (*Env, error) {
e.costOptions = append(e.costOptions, costOpts...)
return e, nil
}
}
// CostTrackerOptions configures a set of options for cost-tracking.
//
// Note, CostTrackerOptions is a no-op unless CostTracking is also enabled.
func CostTrackerOptions(costOpts ...interpreter.CostTrackerOption) ProgramOption {
return func(p *prog) (*prog, error) {
p.costOptions = append(p.costOptions, costOpts...)
return p, nil
}
}
// CostTracking enables cost tracking and registers a ActualCostEstimator that can optionally provide a runtime cost estimate for any function calls.
func CostTracking(costEstimator interpreter.ActualCostEstimator) ProgramOption {
return func(p *prog) (*prog, error) {
@@ -457,25 +509,21 @@ func CostLimit(costLimit uint64) ProgramOption {
}
}
func fieldToCELType(field protoreflect.FieldDescriptor) (*exprpb.Type, error) {
func fieldToCELType(field protoreflect.FieldDescriptor) (*Type, error) {
if field.Kind() == protoreflect.MessageKind || field.Kind() == protoreflect.GroupKind {
msgName := (string)(field.Message().FullName())
wellKnownType, found := pb.CheckedWellKnowns[msgName]
if found {
return wellKnownType, nil
}
return decls.NewObjectType(msgName), nil
return ObjectType(msgName), nil
}
if primitiveType, found := pb.CheckedPrimitives[field.Kind()]; found {
if primitiveType, found := types.ProtoCELPrimitives[field.Kind()]; found {
return primitiveType, nil
}
if field.Kind() == protoreflect.EnumKind {
return decls.Int, nil
return IntType, nil
}
return nil, fmt.Errorf("field %s type %s not implemented", field.FullName(), field.Kind().String())
}
func fieldToDecl(field protoreflect.FieldDescriptor) (*exprpb.Decl, error) {
func fieldToVariable(field protoreflect.FieldDescriptor) (EnvOption, error) {
name := string(field.Name())
if field.IsMap() {
mapKey := field.MapKey()
@@ -488,20 +536,20 @@ func fieldToDecl(field protoreflect.FieldDescriptor) (*exprpb.Decl, error) {
if err != nil {
return nil, err
}
return decls.NewVar(name, decls.NewMapType(keyType, valueType)), nil
return Variable(name, MapType(keyType, valueType)), nil
}
if field.IsList() {
elemType, err := fieldToCELType(field)
if err != nil {
return nil, err
}
return decls.NewVar(name, decls.NewListType(elemType)), nil
return Variable(name, ListType(elemType)), nil
}
celType, err := fieldToCELType(field)
if err != nil {
return nil, err
}
return decls.NewVar(name, celType), nil
return Variable(name, celType), nil
}
// DeclareContextProto returns an option to extend CEL environment with declarations from the given context proto.
@@ -509,25 +557,53 @@ func fieldToDecl(field protoreflect.FieldDescriptor) (*exprpb.Decl, error) {
// https://github.com/google/cel-spec/blob/master/doc/langdef.md#evaluation-environment
func DeclareContextProto(descriptor protoreflect.MessageDescriptor) EnvOption {
return func(e *Env) (*Env, error) {
var decls []*exprpb.Decl
fields := descriptor.Fields()
for i := 0; i < fields.Len(); i++ {
field := fields.Get(i)
decl, err := fieldToDecl(field)
variable, err := fieldToVariable(field)
if err != nil {
return nil, err
}
e, err = variable(e)
if err != nil {
return nil, err
}
decls = append(decls, decl)
}
var err error
e, err = Declarations(decls...)(e)
if err != nil {
return nil, err
}
return Types(dynamicpb.NewMessage(descriptor))(e)
}
}
// ContextProtoVars uses the fields of the input proto.Messages as top-level variables within an Activation.
//
// Consider using with `DeclareContextProto` to simplify variable type declarations and publishing when using
// protocol buffers.
func ContextProtoVars(ctx proto.Message) (interpreter.Activation, error) {
if ctx == nil || !ctx.ProtoReflect().IsValid() {
return interpreter.EmptyActivation(), nil
}
reg, err := types.NewRegistry(ctx)
if err != nil {
return nil, err
}
pbRef := ctx.ProtoReflect()
typeName := string(pbRef.Descriptor().FullName())
fields := pbRef.Descriptor().Fields()
vars := make(map[string]any, fields.Len())
for i := 0; i < fields.Len(); i++ {
field := fields.Get(i)
sft, found := reg.FindStructFieldType(typeName, field.TextName())
if !found {
return nil, fmt.Errorf("no such field: %s", field.TextName())
}
fieldVal, err := sft.GetFrom(ctx)
if err != nil {
return nil, err
}
vars[field.TextName()] = fieldVal
}
return interpreter.NewActivation(vars)
}
// EnableMacroCallTracking ensures that call expressions which are replaced by macros
// are tracked in the `SourceInfo` of parsed and checked expressions.
func EnableMacroCallTracking() EnvOption {
@@ -545,13 +621,6 @@ func DefaultUTCTimeZone(enabled bool) EnvOption {
return features(featureDefaultUTCTimeZone, enabled)
}
// OptionalTypes enable support for optional syntax and types in CEL. The optional value type makes
// it possible to express whether variables have been provided, whether a result has been computed,
// and in the future whether an object field path, map key value, or list index has a value.
func OptionalTypes() EnvOption {
return Lib(optionalLibrary{})
}
// features sets the given feature flags. See list of Feature constants above.
func features(flag int, enabled bool) EnvOption {
return func(e *Env) (*Env, error) {
@@ -577,3 +646,14 @@ func ParserExpressionSizeLimit(limit int) EnvOption {
return e, nil
}
}
func maybeInteropProvider(provider any) (types.Provider, error) {
switch p := provider.(type) {
case types.Provider:
return p, nil
case ref.TypeProvider:
return &interopCELTypeProvider{TypeProvider: p}, nil
default:
return nil, fmt.Errorf("unsupported type provider: %T", provider)
}
}

View File

@@ -19,11 +19,10 @@ import (
"fmt"
"sync"
celast "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/interpreter"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// Program is an evaluable view of an Ast.
@@ -62,6 +61,9 @@ func NoVars() interpreter.Activation {
// PartialVars returns a PartialActivation which contains variables and a set of AttributePattern
// values that indicate variables or parts of variables whose value are not yet known.
//
// This method relies on manually configured sets of missing attribute patterns. For a method which
// infers the missing variables from the input and the configured environment, use Env.PartialVars().
//
// The `vars` value may either be an interpreter.Activation or any valid input to the
// interpreter.NewActivation call.
func PartialVars(vars any,
@@ -104,7 +106,7 @@ func (ed *EvalDetails) State() interpreter.EvalState {
// ActualCost returns the tracked cost through the course of execution when `CostTracking` is enabled.
// Otherwise, returns nil if the cost was not enabled.
func (ed *EvalDetails) ActualCost() *uint64 {
if ed.costTracker == nil {
if ed == nil || ed.costTracker == nil {
return nil
}
cost := ed.costTracker.ActualCost()
@@ -128,10 +130,14 @@ type prog struct {
// Interpretable configured from an Ast and aggregate decorator set based on program options.
interpretable interpreter.Interpretable
callCostEstimator interpreter.ActualCostEstimator
costOptions []interpreter.CostTrackerOption
costLimit *uint64
}
func (p *prog) clone() *prog {
costOptsCopy := make([]interpreter.CostTrackerOption, len(p.costOptions))
copy(costOptsCopy, p.costOptions)
return &prog{
Env: p.Env,
evalOpts: p.evalOpts,
@@ -153,9 +159,10 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
// Ensure the default attribute factory is set after the adapter and provider are
// configured.
p := &prog{
Env: e,
decorators: []interpreter.InterpretableDecorator{},
dispatcher: disp,
Env: e,
decorators: []interpreter.InterpretableDecorator{},
dispatcher: disp,
costOptions: []interpreter.CostTrackerOption{},
}
// Configure the program via the ProgramOption values.
@@ -169,7 +176,7 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
// Add the function bindings created via Function() options.
for _, fn := range e.functions {
bindings, err := fn.bindings()
bindings, err := fn.Bindings()
if err != nil {
return nil, err
}
@@ -208,14 +215,11 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
}
// Enable compile-time checking of syntax/cardinality for string.format calls.
if p.evalOpts&OptCheckStringFormat == OptCheckStringFormat {
var isValidType func(id int64, validTypes ...*types.TypeValue) (bool, error)
var isValidType func(id int64, validTypes ...ref.Type) (bool, error)
if ast.IsChecked() {
isValidType = func(id int64, validTypes ...*types.TypeValue) (bool, error) {
t, err := ExprTypeToType(ast.typeMap[id])
if err != nil {
return false, err
}
if t.kind == DynKind {
isValidType = func(id int64, validTypes ...ref.Type) (bool, error) {
t := ast.typeMap[id]
if t.Kind() == DynKind {
return true, nil
}
for _, vt := range validTypes {
@@ -223,7 +227,7 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
if err != nil {
return false, err
}
if k == t.kind {
if t.Kind() == k {
return true, nil
}
}
@@ -231,7 +235,7 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
}
} else {
// if the AST isn't type-checked, short-circuit validation
isValidType = func(id int64, validTypes ...*types.TypeValue) (bool, error) {
isValidType = func(id int64, validTypes ...ref.Type) (bool, error) {
return true, nil
}
}
@@ -243,6 +247,12 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
factory := func(state interpreter.EvalState, costTracker *interpreter.CostTracker) (Program, error) {
costTracker.Estimator = p.callCostEstimator
costTracker.Limit = p.costLimit
for _, costOpt := range p.costOptions {
err := costOpt(costTracker)
if err != nil {
return nil, err
}
}
// Limit capacity to guarantee a reallocation when calling 'append(decs, ...)' below. This
// prevents the underlying memory from being shared between factory function calls causing
// undesired mutations.
@@ -284,10 +294,11 @@ func (p *prog) initInterpretable(ast *Ast, decs []interpreter.InterpretableDecor
}
// When the AST has been checked it contains metadata that can be used to speed up program execution.
var checked *exprpb.CheckedExpr
checked, err := AstToCheckedExpr(ast)
if err != nil {
return nil, err
checked := &celast.CheckedAST{
Expr: ast.Expr(),
SourceInfo: ast.SourceInfo(),
TypeMap: ast.typeMap,
ReferenceMap: ast.refMap,
}
interpretable, err := p.interpreter.NewInterpretable(checked, decs...)
if err != nil {
@@ -371,7 +382,11 @@ type progGen struct {
// the test is successful.
func newProgGen(factory progFactory) (Program, error) {
// Test the factory to make sure that configuration errors are spotted at config
_, err := factory(interpreter.NewEvalState(), &interpreter.CostTracker{})
tracker, err := interpreter.NewCostTracker(nil)
if err != nil {
return nil, err
}
_, err = factory(interpreter.NewEvalState(), tracker)
if err != nil {
return nil, err
}
@@ -384,7 +399,10 @@ func (gen *progGen) Eval(input any) (ref.Val, *EvalDetails, error) {
// new EvalState instance for each call to ensure that unique evaluations yield unique stateful
// results.
state := interpreter.NewEvalState()
costTracker := &interpreter.CostTracker{}
costTracker, err := interpreter.NewCostTracker(nil)
if err != nil {
return nil, nil, err
}
det := &EvalDetails{state: state, costTracker: costTracker}
// Generate a new instance of the interpretable using the factory configured during the call to
@@ -412,7 +430,10 @@ func (gen *progGen) ContextEval(ctx context.Context, input any) (ref.Val, *EvalD
// new EvalState instance for each call to ensure that unique evaluations yield unique stateful
// results.
state := interpreter.NewEvalState()
costTracker := &interpreter.CostTracker{}
costTracker, err := interpreter.NewCostTracker(nil)
if err != nil {
return nil, nil, err
}
det := &EvalDetails{state: state, costTracker: costTracker}
// Generate a new instance of the interpretable using the factory configured during the call to
@@ -498,7 +519,7 @@ type evalActivation struct {
// The lazy binding will only be invoked once per evaluation.
//
// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
// the ref.TypeAdapter configured in the environment.
// the types.Adapter configured in the environment.
func (a *evalActivation) ResolveName(name string) (any, bool) {
v, found := a.vars[name]
if !found {

388
vendor/github.com/google/cel-go/cel/validator.go generated vendored Normal file
View File

@@ -0,0 +1,388 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cel
import (
"fmt"
"reflect"
"regexp"
"github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/overloads"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
const (
homogeneousValidatorName = "cel.lib.std.validate.types.homogeneous"
// HomogeneousAggregateLiteralExemptFunctions is the ValidatorConfig key used to configure
// the set of function names which are exempt from homogeneous type checks. The expected type
// is a string list of function names.
//
// As an example, the `<string>.format([args])` call expects the input arguments list to be
// comprised of a variety of types which correspond to the types expected by the format control
// clauses; however, all other uses of a mixed element type list, would be unexpected.
HomogeneousAggregateLiteralExemptFunctions = homogeneousValidatorName + ".exempt"
)
// ASTValidators configures a set of ASTValidator instances into the target environment.
//
// Validators are applied in the order in which the are specified and are treated as singletons.
// The same ASTValidator with a given name will not be applied more than once.
func ASTValidators(validators ...ASTValidator) EnvOption {
return func(e *Env) (*Env, error) {
for _, v := range validators {
if !e.HasValidator(v.Name()) {
e.validators = append(e.validators, v)
}
}
return e, nil
}
}
// ASTValidator defines a singleton interface for validating a type-checked Ast against an environment.
//
// Note: the Issues argument is mutable in the sense that it is intended to collect errors which will be
// reported to the caller.
type ASTValidator interface {
// Name returns the name of the validator. Names must be unique.
Name() string
// Validate validates a given Ast within an Environment and collects a set of potential issues.
//
// The ValidatorConfig is generated from the set of ASTValidatorConfigurer instances prior to
// the invocation of the Validate call. The expectation is that the validator configuration
// is created in sequence and immutable once provided to the Validate call.
//
// See individual validators for more information on their configuration keys and configuration
// properties.
Validate(*Env, ValidatorConfig, *ast.CheckedAST, *Issues)
}
// ValidatorConfig provides an accessor method for querying validator configuration state.
type ValidatorConfig interface {
GetOrDefault(name string, value any) any
}
// MutableValidatorConfig provides mutation methods for querying and updating validator configuration
// settings.
type MutableValidatorConfig interface {
ValidatorConfig
Set(name string, value any) error
}
// ASTValidatorConfigurer indicates that this object, currently expected to be an ASTValidator,
// participates in validator configuration settings.
//
// This interface may be split from the expectation of being an ASTValidator instance in the future.
type ASTValidatorConfigurer interface {
Configure(MutableValidatorConfig) error
}
// validatorConfig implements the ValidatorConfig and MutableValidatorConfig interfaces.
type validatorConfig struct {
data map[string]any
}
// newValidatorConfig initializes the validator config with default values for core CEL validators.
func newValidatorConfig() *validatorConfig {
return &validatorConfig{
data: map[string]any{
HomogeneousAggregateLiteralExemptFunctions: []string{},
},
}
}
// GetOrDefault returns the configured value for the name, if present, else the input default value.
//
// Note, the type-agreement between the input default and configured value is not checked on read.
func (config *validatorConfig) GetOrDefault(name string, value any) any {
v, found := config.data[name]
if !found {
return value
}
return v
}
// Set configures a validator option with the given name and value.
//
// If the value had previously been set, the new value must have the same reflection type as the old one,
// or the call will error.
func (config *validatorConfig) Set(name string, value any) error {
v, found := config.data[name]
if found && reflect.TypeOf(v) != reflect.TypeOf(value) {
return fmt.Errorf("incompatible configuration type for %s, got %T, wanted %T", name, value, v)
}
config.data[name] = value
return nil
}
// ExtendedValidations collects a set of common AST validations which reduce the likelihood of runtime errors.
//
// - Validate duration and timestamp literals
// - Ensure regex strings are valid
// - Disable mixed type list and map literals
func ExtendedValidations() EnvOption {
return ASTValidators(
ValidateDurationLiterals(),
ValidateTimestampLiterals(),
ValidateRegexLiterals(),
ValidateHomogeneousAggregateLiterals(),
)
}
// ValidateDurationLiterals ensures that duration literal arguments are valid immediately after type-check.
func ValidateDurationLiterals() ASTValidator {
return newFormatValidator(overloads.TypeConvertDuration, 0, evalCall)
}
// ValidateTimestampLiterals ensures that timestamp literal arguments are valid immediately after type-check.
func ValidateTimestampLiterals() ASTValidator {
return newFormatValidator(overloads.TypeConvertTimestamp, 0, evalCall)
}
// ValidateRegexLiterals ensures that regex patterns are validated after type-check.
func ValidateRegexLiterals() ASTValidator {
return newFormatValidator(overloads.Matches, 0, compileRegex)
}
// ValidateHomogeneousAggregateLiterals checks that all list and map literals entries have the same types, i.e.
// no mixed list element types or mixed map key or map value types.
//
// Note: the string format call relies on a mixed element type list for ease of use, so this check skips all
// literals which occur within string format calls.
func ValidateHomogeneousAggregateLiterals() ASTValidator {
return homogeneousAggregateLiteralValidator{}
}
// ValidateComprehensionNestingLimit ensures that comprehension nesting does not exceed the specified limit.
//
// This validator can be useful for preventing arbitrarily nested comprehensions which can take high polynomial
// time to complete.
//
// Note, this limit does not apply to comprehensions with an empty iteration range, as these comprehensions have
// no actual looping cost. The cel.bind() utilizes the comprehension structure to perform local variable
// assignments and supplies an empty iteration range, so they won't count against the nesting limit either.
func ValidateComprehensionNestingLimit(limit int) ASTValidator {
return nestingLimitValidator{limit: limit}
}
type argChecker func(env *Env, call, arg ast.NavigableExpr) error
func newFormatValidator(funcName string, argNum int, check argChecker) formatValidator {
return formatValidator{
funcName: funcName,
check: check,
argNum: argNum,
}
}
type formatValidator struct {
funcName string
argNum int
check argChecker
}
// Name returns the unique name of this function format validator.
func (v formatValidator) Name() string {
return fmt.Sprintf("cel.lib.std.validate.functions.%s", v.funcName)
}
// Validate searches the AST for uses of a given function name with a constant argument and performs a check
// on whether the argument is a valid literal value.
func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) {
root := ast.NavigateCheckedAST(a)
funcCalls := ast.MatchDescendants(root, ast.FunctionMatcher(v.funcName))
for _, call := range funcCalls {
callArgs := call.AsCall().Args()
if len(callArgs) <= v.argNum {
continue
}
litArg := callArgs[v.argNum]
if litArg.Kind() != ast.LiteralKind {
continue
}
if err := v.check(e, call, litArg); err != nil {
iss.ReportErrorAtID(litArg.ID(), "invalid %s argument", v.funcName)
}
}
}
func evalCall(env *Env, call, arg ast.NavigableExpr) error {
ast := ParsedExprToAst(&exprpb.ParsedExpr{Expr: call.ToExpr()})
prg, err := env.Program(ast)
if err != nil {
return err
}
_, _, err = prg.Eval(NoVars())
return err
}
func compileRegex(_ *Env, _, arg ast.NavigableExpr) error {
pattern := arg.AsLiteral().Value().(string)
_, err := regexp.Compile(pattern)
return err
}
type homogeneousAggregateLiteralValidator struct{}
// Name returns the unique name of the homogeneous type validator.
func (homogeneousAggregateLiteralValidator) Name() string {
return homogeneousValidatorName
}
// Configure implements the ASTValidatorConfigurer interface and currently sets the list of standard
// and exempt functions from homogeneous aggregate literal checks.
//
// TODO: Move this call into the string.format() ASTValidator once ported.
func (homogeneousAggregateLiteralValidator) Configure(c MutableValidatorConfig) error {
emptyList := []string{}
exemptFunctions := c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, emptyList).([]string)
exemptFunctions = append(exemptFunctions, "format")
return c.Set(HomogeneousAggregateLiteralExemptFunctions, exemptFunctions)
}
// Validate validates that all lists and map literals have homogeneous types, i.e. don't contain dyn types.
//
// This validator makes an exception for list and map literals which occur at any level of nesting within
// string format calls.
func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.CheckedAST, iss *Issues) {
var exemptedFunctions []string
exemptedFunctions = c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, exemptedFunctions).([]string)
root := ast.NavigateCheckedAST(a)
listExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.ListKind))
for _, listExpr := range listExprs {
if inExemptFunction(listExpr, exemptedFunctions) {
continue
}
l := listExpr.AsList()
elements := l.Elements()
optIndices := l.OptionalIndices()
var elemType *Type
for i, e := range elements {
et := e.Type()
if isOptionalIndex(i, optIndices) {
et = et.Parameters()[0]
}
if elemType == nil {
elemType = et
continue
}
if !elemType.IsEquivalentType(et) {
v.typeMismatch(iss, e.ID(), elemType, et)
break
}
}
}
mapExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.MapKind))
for _, mapExpr := range mapExprs {
if inExemptFunction(mapExpr, exemptedFunctions) {
continue
}
m := mapExpr.AsMap()
entries := m.Entries()
var keyType, valType *Type
for _, e := range entries {
key, val := e.Key(), e.Value()
kt, vt := key.Type(), val.Type()
if e.IsOptional() {
vt = vt.Parameters()[0]
}
if keyType == nil && valType == nil {
keyType, valType = kt, vt
continue
}
if !keyType.IsEquivalentType(kt) {
v.typeMismatch(iss, key.ID(), keyType, kt)
}
if !valType.IsEquivalentType(vt) {
v.typeMismatch(iss, val.ID(), valType, vt)
}
}
}
}
func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool {
if parent, found := e.Parent(); found {
if parent.Kind() == ast.CallKind {
fnName := parent.AsCall().FunctionName()
for _, exempt := range exemptFunctions {
if exempt == fnName {
return true
}
}
}
if parent.Kind() == ast.ListKind || parent.Kind() == ast.MapKind {
return inExemptFunction(parent, exemptFunctions)
}
}
return false
}
func isOptionalIndex(i int, optIndices []int32) bool {
for _, optInd := range optIndices {
if i == int(optInd) {
return true
}
}
return false
}
func (homogeneousAggregateLiteralValidator) typeMismatch(iss *Issues, id int64, expected, actual *Type) {
iss.ReportErrorAtID(id, "expected type '%s' but found '%s'", FormatCELType(expected), FormatCELType(actual))
}
type nestingLimitValidator struct {
limit int
}
func (v nestingLimitValidator) Name() string {
return "cel.lib.std.validate.comprehension_nesting_limit"
}
func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) {
root := ast.NavigateCheckedAST(a)
comprehensions := ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind))
if len(comprehensions) <= v.limit {
return
}
for _, comp := range comprehensions {
count := 0
e := comp
hasParent := true
for hasParent {
// When the expression is not a comprehension, continue to the next ancestor.
if e.Kind() != ast.ComprehensionKind {
e, hasParent = e.Parent()
continue
}
// When the comprehension has an empty range, continue to the next ancestor
// as this comprehension does not have any associated cost.
iterRange := e.AsComprehension().IterRange()
if iterRange.Kind() == ast.ListKind && iterRange.AsList().Size() == 0 {
e, hasParent = e.Parent()
continue
}
// Otherwise check the nesting limit.
count++
if count > v.limit {
iss.ReportErrorAtID(comp.ID(), "comprehension exceeds nesting limit")
break
}
e, hasParent = e.Parent()
}
}
}

View File

@@ -11,9 +11,11 @@ go_library(
"cost.go",
"env.go",
"errors.go",
"format.go",
"mapping.go",
"options.go",
"printer.go",
"scopes.go",
"standard.go",
"types.go",
],
@@ -22,10 +24,13 @@ go_library(
deps = [
"//checker/decls:go_default_library",
"//common:go_default_library",
"//common/ast:go_default_library",
"//common/containers:go_default_library",
"//common/debug:go_default_library",
"//common/decls:go_default_library",
"//common/operators:go_default_library",
"//common/overloads:go_default_library",
"//common/stdlib:go_default_library",
"//common/types:go_default_library",
"//common/types/pb:go_default_library",
"//common/types/ref:go_default_library",
@@ -44,6 +49,7 @@ go_test(
"checker_test.go",
"cost_test.go",
"env_test.go",
"format_test.go",
],
embed = [
":go_default_library",

View File

@@ -18,15 +18,13 @@ package checker
import (
"fmt"
"reflect"
"github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/containers"
"github.com/google/cel-go/common/decls"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/types/ref"
"google.golang.org/protobuf/proto"
"github.com/google/cel-go/common/types"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
@@ -37,8 +35,8 @@ type checker struct {
mappings *mapping
freeTypeVarCounter int
sourceInfo *exprpb.SourceInfo
types map[int64]*exprpb.Type
references map[int64]*exprpb.Reference
types map[int64]*types.Type
references map[int64]*ast.ReferenceInfo
}
// Check performs type checking, giving a typed AST.
@@ -47,40 +45,38 @@ type checker struct {
// descriptions of protocol buffers, and a registry for errors.
// Returns a CheckedExpr proto, which might not be usable if
// there are errors in the error registry.
func Check(parsedExpr *exprpb.ParsedExpr,
source common.Source,
env *Env) (*exprpb.CheckedExpr, *common.Errors) {
func Check(parsedExpr *exprpb.ParsedExpr, source common.Source, env *Env) (*ast.CheckedAST, *common.Errors) {
errs := common.NewErrors(source)
c := checker{
env: env,
errors: &typeErrors{common.NewErrors(source)},
errors: &typeErrors{errs: errs},
mappings: newMapping(),
freeTypeVarCounter: 0,
sourceInfo: parsedExpr.GetSourceInfo(),
types: make(map[int64]*exprpb.Type),
references: make(map[int64]*exprpb.Reference),
types: make(map[int64]*types.Type),
references: make(map[int64]*ast.ReferenceInfo),
}
c.check(parsedExpr.GetExpr())
// Walk over the final type map substituting any type parameters either by their bound value or
// by DYN.
m := make(map[int64]*exprpb.Type)
for k, v := range c.types {
m[k] = substitute(c.mappings, v, true)
m := make(map[int64]*types.Type)
for id, t := range c.types {
m[id] = substitute(c.mappings, t, true)
}
return &exprpb.CheckedExpr{
return &ast.CheckedAST{
Expr: parsedExpr.GetExpr(),
SourceInfo: parsedExpr.GetSourceInfo(),
TypeMap: m,
ReferenceMap: c.references,
}, c.errors.Errors
}, errs
}
func (c *checker) check(e *exprpb.Expr) {
if e == nil {
return
}
switch e.GetExprKind().(type) {
case *exprpb.Expr_ConstExpr:
literal := e.GetConstExpr()
@@ -113,53 +109,51 @@ func (c *checker) check(e *exprpb.Expr) {
case *exprpb.Expr_ComprehensionExpr:
c.checkComprehension(e)
default:
c.errors.ReportError(
c.location(e), "Unrecognized ast type: %v", reflect.TypeOf(e))
c.errors.unexpectedASTType(e.GetId(), c.location(e), e)
}
}
func (c *checker) checkInt64Literal(e *exprpb.Expr) {
c.setType(e, decls.Int)
c.setType(e, types.IntType)
}
func (c *checker) checkUint64Literal(e *exprpb.Expr) {
c.setType(e, decls.Uint)
c.setType(e, types.UintType)
}
func (c *checker) checkStringLiteral(e *exprpb.Expr) {
c.setType(e, decls.String)
c.setType(e, types.StringType)
}
func (c *checker) checkBytesLiteral(e *exprpb.Expr) {
c.setType(e, decls.Bytes)
c.setType(e, types.BytesType)
}
func (c *checker) checkDoubleLiteral(e *exprpb.Expr) {
c.setType(e, decls.Double)
c.setType(e, types.DoubleType)
}
func (c *checker) checkBoolLiteral(e *exprpb.Expr) {
c.setType(e, decls.Bool)
c.setType(e, types.BoolType)
}
func (c *checker) checkNullLiteral(e *exprpb.Expr) {
c.setType(e, decls.Null)
c.setType(e, types.NullType)
}
func (c *checker) checkIdent(e *exprpb.Expr) {
identExpr := e.GetIdentExpr()
// Check to see if the identifier is declared.
if ident := c.env.LookupIdent(identExpr.GetName()); ident != nil {
c.setType(e, ident.GetIdent().GetType())
c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().GetValue()))
c.setType(e, ident.Type())
c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value()))
// Overwrite the identifier with its fully qualified name.
identExpr.Name = ident.GetName()
identExpr.Name = ident.Name()
return
}
c.setType(e, decls.Error)
c.errors.undeclaredReference(
c.location(e), c.env.container.Name(), identExpr.GetName())
c.setType(e, types.ErrorType)
c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), identExpr.GetName())
}
func (c *checker) checkSelect(e *exprpb.Expr) {
@@ -174,9 +168,9 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
// Rewrite the node to be a variable reference to the resolved fully-qualified
// variable name.
c.setType(e, ident.GetIdent().GetType())
c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().GetValue()))
identName := ident.GetName()
c.setType(e, ident.Type())
c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value()))
identName := ident.Name()
e.ExprKind = &exprpb.Expr_IdentExpr{
IdentExpr: &exprpb.Expr_Ident{
Name: identName,
@@ -188,7 +182,7 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
resultType := c.checkSelectField(e, sel.GetOperand(), sel.GetField(), false)
if sel.TestOnly {
resultType = decls.Bool
resultType = types.BoolType
}
c.setType(e, substitute(c.mappings, resultType, false))
}
@@ -200,16 +194,17 @@ func (c *checker) checkOptSelect(e *exprpb.Expr) {
field := call.GetArgs()[1]
fieldName, isString := maybeUnwrapString(field)
if !isString {
c.errors.ReportError(c.location(field), "unsupported optional field selection: %v", field)
c.errors.notAnOptionalFieldSelection(field.GetId(), c.location(field), field)
return
}
// Perform type-checking using the field selection logic.
resultType := c.checkSelectField(e, operand, fieldName, true)
c.setType(e, substitute(c.mappings, resultType, false))
c.setReference(e, ast.NewFunctionReference("select_optional_field"))
}
func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *exprpb.Type {
func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *types.Type {
// Interpret as field selection, first traversing down the operand.
c.check(operand)
operandType := substitute(c.mappings, c.getType(operand), false)
@@ -218,38 +213,37 @@ func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, option
targetType, isOpt := maybeUnwrapOptional(operandType)
// Assume error type by default as most types do not support field selection.
resultType := decls.Error
switch kindOf(targetType) {
case kindMap:
resultType := types.ErrorType
switch targetType.Kind() {
case types.MapKind:
// Maps yield their value type as the selection result type.
mapType := targetType.GetMapType()
resultType = mapType.GetValueType()
case kindObject:
resultType = targetType.Parameters()[1]
case types.StructKind:
// Objects yield their field type declaration as the selection result type, but only if
// the field is defined.
messageType := targetType
if fieldType, found := c.lookupFieldType(c.location(e), messageType.GetMessageType(), field); found {
resultType = fieldType.Type
if fieldType, found := c.lookupFieldType(e.GetId(), messageType.TypeName(), field); found {
resultType = fieldType
}
case kindTypeParam:
case types.TypeParamKind:
// Set the operand type to DYN to prevent assignment to a potentially incorrect type
// at a later point in type-checking. The isAssignable call will update the type
// substitutions for the type param under the covers.
c.isAssignable(decls.Dyn, targetType)
c.isAssignable(types.DynType, targetType)
// Also, set the result type to DYN.
resultType = decls.Dyn
resultType = types.DynType
default:
// Dynamic / error values are treated as DYN type. Errors are handled this way as well
// in order to allow forward progress on the check.
if !isDynOrError(targetType) {
c.errors.typeDoesNotSupportFieldSelection(c.location(e), targetType)
c.errors.typeDoesNotSupportFieldSelection(e.GetId(), c.location(e), targetType)
}
resultType = decls.Dyn
resultType = types.DynType
}
// If the target type was optional coming in, then the result must be optional going out.
if isOpt || optional {
return decls.NewOptionalType(resultType)
return types.NewOptionalType(resultType)
}
return resultType
}
@@ -277,15 +271,14 @@ func (c *checker) checkCall(e *exprpb.Expr) {
// Check for the existence of the function.
fn := c.env.LookupFunction(fnName)
if fn == nil {
c.errors.undeclaredReference(
c.location(e), c.env.container.Name(), fnName)
c.setType(e, decls.Error)
c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName)
c.setType(e, types.ErrorType)
return
}
// Overwrite the function name with its fully qualified resolved name.
call.Function = fn.GetName()
call.Function = fn.Name()
// Check to see whether the overload resolves.
c.resolveOverloadOrError(c.location(e), e, fn, nil, args)
c.resolveOverloadOrError(e, fn, nil, args)
return
}
@@ -303,8 +296,8 @@ func (c *checker) checkCall(e *exprpb.Expr) {
// be an inaccurate representation of the desired evaluation behavior.
// Overwrite with fully-qualified resolved function name sans receiver target.
call.Target = nil
call.Function = fn.GetName()
c.resolveOverloadOrError(c.location(e), e, fn, nil, args)
call.Function = fn.Name()
c.resolveOverloadOrError(e, fn, nil, args)
return
}
}
@@ -314,22 +307,21 @@ func (c *checker) checkCall(e *exprpb.Expr) {
fn := c.env.LookupFunction(fnName)
// Function found, attempt overload resolution.
if fn != nil {
c.resolveOverloadOrError(c.location(e), e, fn, target, args)
c.resolveOverloadOrError(e, fn, target, args)
return
}
// Function name not declared, record error.
c.errors.undeclaredReference(c.location(e), c.env.container.Name(), fnName)
c.setType(e, types.ErrorType)
c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName)
}
func (c *checker) resolveOverloadOrError(
loc common.Location,
e *exprpb.Expr,
fn *exprpb.Decl, target *exprpb.Expr, args []*exprpb.Expr) {
e *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) {
// Attempt to resolve the overload.
resolution := c.resolveOverload(loc, fn, target, args)
resolution := c.resolveOverload(e, fn, target, args)
// No such overload, error noted in the resolveOverload call, type recorded here.
if resolution == nil {
c.setType(e, decls.Error)
c.setType(e, types.ErrorType)
return
}
// Overload found.
@@ -338,10 +330,9 @@ func (c *checker) resolveOverloadOrError(
}
func (c *checker) resolveOverload(
loc common.Location,
fn *exprpb.Decl, target *exprpb.Expr, args []*exprpb.Expr) *overloadResolution {
call *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) *overloadResolution {
var argTypes []*exprpb.Type
var argTypes []*types.Type
if target != nil {
argTypes = append(argTypes, c.getType(target))
}
@@ -349,55 +340,75 @@ func (c *checker) resolveOverload(
argTypes = append(argTypes, c.getType(arg))
}
var resultType *exprpb.Type
var checkedRef *exprpb.Reference
for _, overload := range fn.GetFunction().GetOverloads() {
var resultType *types.Type
var checkedRef *ast.ReferenceInfo
for _, overload := range fn.OverloadDecls() {
// Determine whether the overload is currently considered.
if c.env.isOverloadDisabled(overload.GetOverloadId()) {
if c.env.isOverloadDisabled(overload.ID()) {
continue
}
// Ensure the call style for the overload matches.
if (target == nil && overload.GetIsInstanceFunction()) ||
(target != nil && !overload.GetIsInstanceFunction()) {
if (target == nil && overload.IsMemberFunction()) ||
(target != nil && !overload.IsMemberFunction()) {
// not a compatible call style.
continue
}
overloadType := decls.NewFunctionType(overload.ResultType, overload.Params...)
if len(overload.GetTypeParams()) > 0 {
// Alternative type-checking behavior when the logical operators are compacted into
// variadic AST representations.
if fn.Name() == operators.LogicalAnd || fn.Name() == operators.LogicalOr {
checkedRef = ast.NewFunctionReference(overload.ID())
for i, argType := range argTypes {
if !c.isAssignable(argType, types.BoolType) {
c.errors.typeMismatch(
args[i].GetId(),
c.locationByID(args[i].GetId()),
types.BoolType,
argType)
resultType = types.ErrorType
}
}
if isError(resultType) {
return nil
}
return newResolution(checkedRef, types.BoolType)
}
overloadType := newFunctionType(overload.ResultType(), overload.ArgTypes()...)
typeParams := overload.TypeParams()
if len(typeParams) != 0 {
// Instantiate overload's type with fresh type variables.
substitutions := newMapping()
for _, typePar := range overload.GetTypeParams() {
substitutions.add(decls.NewTypeParamType(typePar), c.newTypeVar())
for _, typePar := range typeParams {
substitutions.add(types.NewTypeParamType(typePar), c.newTypeVar())
}
overloadType = substitute(substitutions, overloadType, false)
}
candidateArgTypes := overloadType.GetFunction().GetArgTypes()
candidateArgTypes := overloadType.Parameters()[1:]
if c.isAssignableList(argTypes, candidateArgTypes) {
if checkedRef == nil {
checkedRef = newFunctionReference(overload.GetOverloadId())
checkedRef = ast.NewFunctionReference(overload.ID())
} else {
checkedRef.OverloadId = append(checkedRef.GetOverloadId(), overload.GetOverloadId())
checkedRef.AddOverload(overload.ID())
}
// First matching overload, determines result type.
fnResultType := substitute(c.mappings, overloadType.GetFunction().GetResultType(), false)
fnResultType := substitute(c.mappings, overloadType.Parameters()[0], false)
if resultType == nil {
resultType = fnResultType
} else if !isDyn(resultType) && !proto.Equal(fnResultType, resultType) {
resultType = decls.Dyn
} else if !isDyn(resultType) && !fnResultType.IsExactType(resultType) {
resultType = types.DynType
}
}
}
if resultType == nil {
for i, arg := range argTypes {
argTypes[i] = substitute(c.mappings, arg, true)
for i, argType := range argTypes {
argTypes[i] = substitute(c.mappings, argType, true)
}
c.errors.noMatchingOverload(loc, fn.GetName(), argTypes, target != nil)
resultType = decls.Error
c.errors.noMatchingOverload(call.GetId(), c.location(call), fn.Name(), argTypes, target != nil)
return nil
}
@@ -406,7 +417,7 @@ func (c *checker) resolveOverload(
func (c *checker) checkCreateList(e *exprpb.Expr) {
create := e.GetListExpr()
var elemsType *exprpb.Type
var elemsType *types.Type
optionalIndices := create.GetOptionalIndices()
optionals := make(map[int32]bool, len(optionalIndices))
for _, optInd := range optionalIndices {
@@ -419,16 +430,16 @@ func (c *checker) checkCreateList(e *exprpb.Expr) {
var isOptional bool
elemType, isOptional = maybeUnwrapOptional(elemType)
if !isOptional && !isDyn(elemType) {
c.errors.typeMismatch(c.location(e), decls.NewOptionalType(elemType), elemType)
c.errors.typeMismatch(e.GetId(), c.location(e), types.NewOptionalType(elemType), elemType)
}
}
elemsType = c.joinTypes(c.location(e), elemsType, elemType)
elemsType = c.joinTypes(e, elemsType, elemType)
}
if elemsType == nil {
// If the list is empty, assign free type var to elem type.
elemsType = c.newTypeVar()
}
c.setType(e, decls.NewListType(elemsType))
c.setType(e, types.NewListType(elemsType))
}
func (c *checker) checkCreateStruct(e *exprpb.Expr) {
@@ -442,12 +453,12 @@ func (c *checker) checkCreateStruct(e *exprpb.Expr) {
func (c *checker) checkCreateMap(e *exprpb.Expr) {
mapVal := e.GetStructExpr()
var mapKeyType *exprpb.Type
var mapValueType *exprpb.Type
var mapKeyType *types.Type
var mapValueType *types.Type
for _, ent := range mapVal.GetEntries() {
key := ent.GetMapKey()
c.check(key)
mapKeyType = c.joinTypes(c.location(key), mapKeyType, c.getType(key))
mapKeyType = c.joinTypes(key, mapKeyType, c.getType(key))
val := ent.GetValue()
c.check(val)
@@ -456,50 +467,54 @@ func (c *checker) checkCreateMap(e *exprpb.Expr) {
var isOptional bool
valType, isOptional = maybeUnwrapOptional(valType)
if !isOptional && !isDyn(valType) {
c.errors.typeMismatch(c.location(val), decls.NewOptionalType(valType), valType)
c.errors.typeMismatch(val.GetId(), c.location(val), types.NewOptionalType(valType), valType)
}
}
mapValueType = c.joinTypes(c.location(val), mapValueType, valType)
mapValueType = c.joinTypes(val, mapValueType, valType)
}
if mapKeyType == nil {
// If the map is empty, assign free type variables to typeKey and value type.
mapKeyType = c.newTypeVar()
mapValueType = c.newTypeVar()
}
c.setType(e, decls.NewMapType(mapKeyType, mapValueType))
c.setType(e, types.NewMapType(mapKeyType, mapValueType))
}
func (c *checker) checkCreateMessage(e *exprpb.Expr) {
msgVal := e.GetStructExpr()
// Determine the type of the message.
messageType := decls.Error
decl := c.env.LookupIdent(msgVal.GetMessageName())
if decl == nil {
resultType := types.ErrorType
ident := c.env.LookupIdent(msgVal.GetMessageName())
if ident == nil {
c.errors.undeclaredReference(
c.location(e), c.env.container.Name(), msgVal.GetMessageName())
e.GetId(), c.location(e), c.env.container.Name(), msgVal.GetMessageName())
c.setType(e, types.ErrorType)
return
}
// Ensure the type name is fully qualified in the AST.
msgVal.MessageName = decl.GetName()
c.setReference(e, newIdentReference(decl.GetName(), nil))
ident := decl.GetIdent()
identKind := kindOf(ident.GetType())
if identKind != kindError {
if identKind != kindType {
c.errors.notAType(c.location(e), ident.GetType())
typeName := ident.Name()
msgVal.MessageName = typeName
c.setReference(e, ast.NewIdentReference(ident.Name(), nil))
identKind := ident.Type().Kind()
if identKind != types.ErrorKind {
if identKind != types.TypeKind {
c.errors.notAType(e.GetId(), c.location(e), ident.Type().DeclaredTypeName())
} else {
messageType = ident.GetType().GetType()
if kindOf(messageType) != kindObject {
c.errors.notAMessageType(c.location(e), messageType)
messageType = decls.Error
resultType = ident.Type().Parameters()[0]
// Backwards compatibility test between well-known types and message types
// In this context, the type is being instantiated by its protobuf name which
// is not ideal or recommended, but some users expect this to work.
if isWellKnownType(resultType) {
typeName = getWellKnownTypeName(resultType)
} else if resultType.Kind() == types.StructKind {
typeName = resultType.DeclaredTypeName()
} else {
c.errors.notAMessageType(e.GetId(), c.location(e), resultType.DeclaredTypeName())
resultType = types.ErrorType
}
}
}
if isObjectWellKnownType(messageType) {
c.setType(e, getObjectWellKnownType(messageType))
} else {
c.setType(e, messageType)
}
c.setType(e, resultType)
// Check the field initializers.
for _, ent := range msgVal.GetEntries() {
@@ -507,10 +522,10 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) {
value := ent.GetValue()
c.check(value)
fieldType := decls.Error
ft, found := c.lookupFieldType(c.locationByID(ent.GetId()), messageType.GetMessageType(), field)
fieldType := types.ErrorType
ft, found := c.lookupFieldType(ent.GetId(), typeName, field)
if found {
fieldType = ft.Type
fieldType = ft
}
valType := c.getType(value)
@@ -518,11 +533,11 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) {
var isOptional bool
valType, isOptional = maybeUnwrapOptional(valType)
if !isOptional && !isDyn(valType) {
c.errors.typeMismatch(c.location(value), decls.NewOptionalType(valType), valType)
c.errors.typeMismatch(value.GetId(), c.location(value), types.NewOptionalType(valType), valType)
}
}
if !c.isAssignable(fieldType, valType) {
c.errors.fieldTypeMismatch(c.locationByID(ent.Id), field, fieldType, valType)
c.errors.fieldTypeMismatch(ent.GetId(), c.locationByID(ent.GetId()), field, fieldType, valType)
}
}
}
@@ -533,36 +548,36 @@ func (c *checker) checkComprehension(e *exprpb.Expr) {
c.check(comp.GetAccuInit())
accuType := c.getType(comp.GetAccuInit())
rangeType := substitute(c.mappings, c.getType(comp.GetIterRange()), false)
var varType *exprpb.Type
var varType *types.Type
switch kindOf(rangeType) {
case kindList:
varType = rangeType.GetListType().GetElemType()
case kindMap:
switch rangeType.Kind() {
case types.ListKind:
varType = rangeType.Parameters()[0]
case types.MapKind:
// Ranges over the keys.
varType = rangeType.GetMapType().GetKeyType()
case kindDyn, kindError, kindTypeParam:
varType = rangeType.Parameters()[0]
case types.DynKind, types.ErrorKind, types.TypeParamKind:
// Set the range type to DYN to prevent assignment to a potentially incorrect type
// at a later point in type-checking. The isAssignable call will update the type
// substitutions for the type param under the covers.
c.isAssignable(decls.Dyn, rangeType)
c.isAssignable(types.DynType, rangeType)
// Set the range iteration variable to type DYN as well.
varType = decls.Dyn
varType = types.DynType
default:
c.errors.notAComprehensionRange(c.location(comp.GetIterRange()), rangeType)
varType = decls.Error
c.errors.notAComprehensionRange(comp.GetIterRange().GetId(), c.location(comp.GetIterRange()), rangeType)
varType = types.ErrorType
}
// Create a scope for the comprehension since it has a local accumulation variable.
// This scope will contain the accumulation variable used to compute the result.
c.env = c.env.enterScope()
c.env.Add(decls.NewVar(comp.GetAccuVar(), accuType))
c.env.AddIdents(decls.NewVariable(comp.GetAccuVar(), accuType))
// Create a block scope for the loop.
c.env = c.env.enterScope()
c.env.Add(decls.NewVar(comp.GetIterVar(), varType))
c.env.AddIdents(decls.NewVariable(comp.GetIterVar(), varType))
// Check the variable references in the condition and step.
c.check(comp.GetLoopCondition())
c.assertType(comp.GetLoopCondition(), decls.Bool)
c.assertType(comp.GetLoopCondition(), types.BoolType)
c.check(comp.GetLoopStep())
c.assertType(comp.GetLoopStep(), accuType)
// Exit the loop's block scope before checking the result.
@@ -574,9 +589,7 @@ func (c *checker) checkComprehension(e *exprpb.Expr) {
}
// Checks compatibility of joined types, and returns the most general common type.
func (c *checker) joinTypes(loc common.Location,
previous *exprpb.Type,
current *exprpb.Type) *exprpb.Type {
func (c *checker) joinTypes(e *exprpb.Expr, previous, current *types.Type) *types.Type {
if previous == nil {
return current
}
@@ -584,23 +597,23 @@ func (c *checker) joinTypes(loc common.Location,
return mostGeneral(previous, current)
}
if c.dynAggregateLiteralElementTypesEnabled() {
return decls.Dyn
return types.DynType
}
c.errors.typeMismatch(loc, previous, current)
return decls.Error
c.errors.typeMismatch(e.GetId(), c.location(e), previous, current)
return types.ErrorType
}
func (c *checker) dynAggregateLiteralElementTypesEnabled() bool {
return c.env.aggLitElemType == dynElementType
}
func (c *checker) newTypeVar() *exprpb.Type {
func (c *checker) newTypeVar() *types.Type {
id := c.freeTypeVarCounter
c.freeTypeVarCounter++
return decls.NewTypeParamType(fmt.Sprintf("_var%d", id))
return types.NewTypeParamType(fmt.Sprintf("_var%d", id))
}
func (c *checker) isAssignable(t1 *exprpb.Type, t2 *exprpb.Type) bool {
func (c *checker) isAssignable(t1, t2 *types.Type) bool {
subs := isAssignable(c.mappings, t1, t2)
if subs != nil {
c.mappings = subs
@@ -610,7 +623,7 @@ func (c *checker) isAssignable(t1 *exprpb.Type, t2 *exprpb.Type) bool {
return false
}
func (c *checker) isAssignableList(l1 []*exprpb.Type, l2 []*exprpb.Type) bool {
func (c *checker) isAssignableList(l1, l2 []*types.Type) bool {
subs := isAssignableList(c.mappings, l1, l2)
if subs != nil {
c.mappings = subs
@@ -620,57 +633,52 @@ func (c *checker) isAssignableList(l1 []*exprpb.Type, l2 []*exprpb.Type) bool {
return false
}
func (c *checker) lookupFieldType(l common.Location, messageType string, fieldName string) (*ref.FieldType, bool) {
if _, found := c.env.provider.FindType(messageType); !found {
// This should not happen, anyway, report an error.
c.errors.unexpectedFailedResolution(l, messageType)
return nil, false
func maybeUnwrapString(e *exprpb.Expr) (string, bool) {
switch e.GetExprKind().(type) {
case *exprpb.Expr_ConstExpr:
literal := e.GetConstExpr()
switch literal.GetConstantKind().(type) {
case *exprpb.Constant_StringValue:
return literal.GetStringValue(), true
}
}
if ft, found := c.env.provider.FindFieldType(messageType, fieldName); found {
return ft, found
}
c.errors.undefinedField(l, fieldName)
return nil, false
return "", false
}
func (c *checker) setType(e *exprpb.Expr, t *exprpb.Type) {
if old, found := c.types[e.GetId()]; found && !proto.Equal(old, t) {
c.errors.ReportError(c.location(e),
"(Incompatible) Type already exists for expression: %v(%d) old:%v, new:%v", e, e.GetId(), old, t)
func (c *checker) setType(e *exprpb.Expr, t *types.Type) {
if old, found := c.types[e.GetId()]; found && !old.IsExactType(t) {
c.errors.incompatibleType(e.GetId(), c.location(e), e, old, t)
return
}
c.types[e.GetId()] = t
}
func (c *checker) getType(e *exprpb.Expr) *exprpb.Type {
func (c *checker) getType(e *exprpb.Expr) *types.Type {
return c.types[e.GetId()]
}
func (c *checker) setReference(e *exprpb.Expr, r *exprpb.Reference) {
if old, found := c.references[e.GetId()]; found && !proto.Equal(old, r) {
c.errors.ReportError(c.location(e),
"Reference already exists for expression: %v(%d) old:%v, new:%v", e, e.GetId(), old, r)
func (c *checker) setReference(e *exprpb.Expr, r *ast.ReferenceInfo) {
if old, found := c.references[e.GetId()]; found && !old.Equals(r) {
c.errors.referenceRedefinition(e.GetId(), c.location(e), e, old, r)
return
}
c.references[e.GetId()] = r
}
func (c *checker) assertType(e *exprpb.Expr, t *exprpb.Type) {
func (c *checker) assertType(e *exprpb.Expr, t *types.Type) {
if !c.isAssignable(t, c.getType(e)) {
c.errors.typeMismatch(c.location(e), t, c.getType(e))
c.errors.typeMismatch(e.GetId(), c.location(e), t, c.getType(e))
}
}
type overloadResolution struct {
Reference *exprpb.Reference
Type *exprpb.Type
Type *types.Type
Reference *ast.ReferenceInfo
}
func newResolution(checkedRef *exprpb.Reference, t *exprpb.Type) *overloadResolution {
func newResolution(r *ast.ReferenceInfo, t *types.Type) *overloadResolution {
return &overloadResolution{
Reference: checkedRef,
Reference: r,
Type: t,
}
}
@@ -697,10 +705,56 @@ func (c *checker) locationByID(id int64) common.Location {
return common.NoLocation
}
func newIdentReference(name string, value *exprpb.Constant) *exprpb.Reference {
return &exprpb.Reference{Name: name, Value: value}
func (c *checker) lookupFieldType(exprID int64, structType, fieldName string) (*types.Type, bool) {
if _, found := c.env.provider.FindStructType(structType); !found {
// This should not happen, anyway, report an error.
c.errors.unexpectedFailedResolution(exprID, c.locationByID(exprID), structType)
return nil, false
}
if ft, found := c.env.provider.FindStructFieldType(structType, fieldName); found {
return ft.Type, found
}
c.errors.undefinedField(exprID, c.locationByID(exprID), fieldName)
return nil, false
}
func newFunctionReference(overloads ...string) *exprpb.Reference {
return &exprpb.Reference{OverloadId: overloads}
func isWellKnownType(t *types.Type) bool {
switch t.Kind() {
case types.AnyKind, types.TimestampKind, types.DurationKind, types.DynKind, types.NullTypeKind:
return true
case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind:
return t.IsAssignableType(types.NullType)
case types.ListKind:
return t.Parameters()[0] == types.DynType
case types.MapKind:
return t.Parameters()[0] == types.StringType && t.Parameters()[1] == types.DynType
}
return false
}
func getWellKnownTypeName(t *types.Type) string {
if name, found := wellKnownTypes[t.Kind()]; found {
return name
}
return ""
}
var (
wellKnownTypes = map[types.Kind]string{
types.AnyKind: "google.protobuf.Any",
types.BoolKind: "google.protobuf.BoolValue",
types.BytesKind: "google.protobuf.BytesValue",
types.DoubleKind: "google.protobuf.DoubleValue",
types.DurationKind: "google.protobuf.Duration",
types.DynKind: "google.protobuf.Value",
types.IntKind: "google.protobuf.Int64Value",
types.ListKind: "google.protobuf.ListValue",
types.NullTypeKind: "google.protobuf.NullValue",
types.MapKind: "google.protobuf.Struct",
types.StringKind: "google.protobuf.StringValue",
types.TimestampKind: "google.protobuf.Timestamp",
types.UintKind: "google.protobuf.UInt64Value",
}
)

View File

@@ -18,7 +18,9 @@ import (
"math"
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/parser"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
@@ -54,7 +56,7 @@ type AstNode interface {
// The first path element is a variable. All subsequent path elements are one of: field name, '@items', '@keys', '@values'.
Path() []string
// Type returns the deduced type of the AstNode.
Type() *exprpb.Type
Type() *types.Type
// Expr returns the expression of the AstNode.
Expr() *exprpb.Expr
// ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression.
@@ -66,7 +68,7 @@ type AstNode interface {
type astNode struct {
path []string
t *exprpb.Type
t *types.Type
expr *exprpb.Expr
derivedSize *SizeEstimate
}
@@ -75,7 +77,7 @@ func (e astNode) Path() []string {
return e.path
}
func (e astNode) Type() *exprpb.Type {
func (e astNode) Type() *types.Type {
return e.t
}
@@ -228,7 +230,7 @@ func addUint64NoOverflow(x, y uint64) uint64 {
// multiplyUint64NoOverflow multiplies non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64
// is returned.
func multiplyUint64NoOverflow(x, y uint64) uint64 {
if x > 0 && y > 0 && x > math.MaxUint64/y {
if y != 0 && x > math.MaxUint64/y {
return math.MaxUint64
}
return x * y
@@ -240,7 +242,11 @@ func multiplyByCostFactor(x uint64, y float64) uint64 {
if xFloat > 0 && y > 0 && xFloat > math.MaxUint64/y {
return math.MaxUint64
}
return uint64(math.Ceil(xFloat * y))
ceil := math.Ceil(xFloat * y)
if ceil >= doubleTwoTo64 {
return math.MaxUint64
}
return uint64(ceil)
}
var (
@@ -258,9 +264,10 @@ type coster struct {
// iterRanges tracks the iterRange of each iterVar.
iterRanges iterRangeScopes
// computedSizes tracks the computed sizes of call results.
computedSizes map[int64]SizeEstimate
checkedExpr *exprpb.CheckedExpr
estimator CostEstimator
computedSizes map[int64]SizeEstimate
checkedAST *ast.CheckedAST
estimator CostEstimator
overloadEstimators map[string]FunctionEstimator
// presenceTestCost will either be a zero or one based on whether has() macros count against cost computations.
presenceTestCost CostEstimate
}
@@ -289,6 +296,7 @@ func (vs iterRangeScopes) peek(varName string) (int64, bool) {
type CostOption func(*coster) error
// PresenceTestHasCost determines whether presence testing has a cost of one or zero.
//
// Defaults to presence test has a cost of one.
func PresenceTestHasCost(hasCost bool) CostOption {
return func(c *coster) error {
@@ -301,15 +309,30 @@ func PresenceTestHasCost(hasCost bool) CostOption {
}
}
// FunctionEstimator provides a CallEstimate given the target and arguments for a specific function, overload pair.
type FunctionEstimator func(estimator CostEstimator, target *AstNode, args []AstNode) *CallEstimate
// OverloadCostEstimate binds a FunctionCoster to a specific function overload ID.
//
// When a OverloadCostEstimate is provided, it will override the cost calculation of the CostEstimator provided to
// the Cost() call.
func OverloadCostEstimate(overloadID string, functionCoster FunctionEstimator) CostOption {
return func(c *coster) error {
c.overloadEstimators[overloadID] = functionCoster
return nil
}
}
// Cost estimates the cost of the parsed and type checked CEL expression.
func Cost(checker *exprpb.CheckedExpr, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) {
func Cost(checker *ast.CheckedAST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) {
c := &coster{
checkedExpr: checker,
estimator: estimator,
exprPath: map[int64][]string{},
iterRanges: map[string][]int64{},
computedSizes: map[int64]SizeEstimate{},
presenceTestCost: CostEstimate{Min: 1, Max: 1},
checkedAST: checker,
estimator: estimator,
overloadEstimators: map[string]FunctionEstimator{},
exprPath: map[int64][]string{},
iterRanges: map[string][]int64{},
computedSizes: map[int64]SizeEstimate{},
presenceTestCost: CostEstimate{Min: 1, Max: 1},
}
for _, opt := range opts {
err := opt(c)
@@ -317,7 +340,7 @@ func Cost(checker *exprpb.CheckedExpr, estimator CostEstimator, opts ...CostOpti
return CostEstimate{}, err
}
}
return c.cost(checker.GetExpr()), nil
return c.cost(checker.Expr), nil
}
func (c *coster) cost(e *exprpb.Expr) CostEstimate {
@@ -351,10 +374,10 @@ func (c *coster) costIdent(e *exprpb.Expr) CostEstimate {
// build and track the field path
if iterRange, ok := c.iterRanges.peek(identExpr.GetName()); ok {
switch c.checkedExpr.TypeMap[iterRange].GetTypeKind().(type) {
case *exprpb.Type_ListType_:
switch c.checkedAST.TypeMap[iterRange].Kind() {
case types.ListKind:
c.addPath(e, append(c.exprPath[iterRange], "@items"))
case *exprpb.Type_MapType_:
case types.MapKind:
c.addPath(e, append(c.exprPath[iterRange], "@keys"))
}
} else {
@@ -378,8 +401,8 @@ func (c *coster) costSelect(e *exprpb.Expr) CostEstimate {
}
sum = sum.Add(c.cost(sel.GetOperand()))
targetType := c.getType(sel.GetOperand())
switch kindOf(targetType) {
case kindMap, kindObject, kindTypeParam:
switch targetType.Kind() {
case types.MapKind, types.StructKind, types.TypeParamKind:
sum = sum.Add(selectAndIdentCost)
}
@@ -403,8 +426,8 @@ func (c *coster) costCall(e *exprpb.Expr) CostEstimate {
argTypes[i] = c.newAstNode(arg)
}
ref := c.checkedExpr.ReferenceMap[e.GetId()]
if ref == nil || len(ref.GetOverloadId()) == 0 {
ref := c.checkedAST.ReferenceMap[e.GetId()]
if ref == nil || len(ref.OverloadIDs) == 0 {
return CostEstimate{}
}
var targetType AstNode
@@ -417,7 +440,7 @@ func (c *coster) costCall(e *exprpb.Expr) CostEstimate {
// Pick a cost estimate range that covers all the overload cost estimation ranges
fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0}
var resultSize *SizeEstimate
for _, overload := range ref.GetOverloadId() {
for _, overload := range ref.OverloadIDs {
overloadCost := c.functionCost(call.GetFunction(), overload, &targetType, argTypes, argCosts)
fnCost = fnCost.Union(overloadCost.CostEstimate)
if overloadCost.ResultSize != nil {
@@ -530,7 +553,14 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
}
return sum
}
if len(c.overloadEstimators) != 0 {
if estimator, found := c.overloadEstimators[overloadID]; found {
if est := estimator(c.estimator, target, args); est != nil {
callEst := *est
return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize}
}
}
}
if est := c.estimator.EstimateCallCost(function, overloadID, target, args); est != nil {
callEst := *est
return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize}
@@ -641,8 +671,8 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())}
}
func (c *coster) getType(e *exprpb.Expr) *exprpb.Type {
return c.checkedExpr.TypeMap[e.GetId()]
func (c *coster) getType(e *exprpb.Expr) *types.Type {
return c.checkedAST.TypeMap[e.GetId()]
}
func (c *coster) getPath(e *exprpb.Expr) []string {
@@ -663,22 +693,24 @@ func (c *coster) newAstNode(e *exprpb.Expr) *astNode {
if size, ok := c.computedSizes[e.GetId()]; ok {
derivedSize = &size
}
return &astNode{path: path, t: c.getType(e), expr: e, derivedSize: derivedSize}
return &astNode{
path: path,
t: c.getType(e),
expr: e,
derivedSize: derivedSize}
}
// isScalar returns true if the given type is known to be of a constant size at
// compile time. isScalar will return false for strings (they are variable-width)
// in addition to protobuf.Any and protobuf.Value (their size is not knowable at compile time).
func isScalar(t *exprpb.Type) bool {
switch kindOf(t) {
case kindPrimitive:
if t.GetPrimitive() != exprpb.Type_STRING && t.GetPrimitive() != exprpb.Type_BYTES {
return true
}
case kindWellKnown:
if t.GetWellKnown() == exprpb.Type_DURATION || t.GetWellKnown() == exprpb.Type_TIMESTAMP {
return true
}
func isScalar(t *types.Type) bool {
switch t.Kind() {
case types.BoolKind, types.DoubleKind, types.DurationKind, types.IntKind, types.TimestampKind, types.UintKind:
return true
}
return false
}
var (
doubleTwoTo64 = math.Ldexp(1.0, 64)
)

View File

@@ -9,7 +9,6 @@ go_library(
name = "go_default_library",
srcs = [
"decls.go",
"scopes.go",
],
importpath = "github.com/google/cel-go/checker/decls",
deps = [

View File

@@ -18,17 +18,11 @@ import (
"fmt"
"strings"
"google.golang.org/protobuf/proto"
"github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common/containers"
"github.com/google/cel-go/common/decls"
"github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/pb"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/parser"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
type aggregateLiteralElementType int
@@ -76,15 +70,15 @@ var (
// which can be used to assist with type-checking.
type Env struct {
container *containers.Container
provider ref.TypeProvider
declarations *decls.Scopes
provider types.Provider
declarations *Scopes
aggLitElemType aggregateLiteralElementType
filteredOverloadIDs map[string]struct{}
}
// NewEnv returns a new *Env with the given parameters.
func NewEnv(container *containers.Container, provider ref.TypeProvider, opts ...Option) (*Env, error) {
declarations := decls.NewScopes()
func NewEnv(container *containers.Container, provider types.Provider, opts ...Option) (*Env, error) {
declarations := newScopes()
declarations.Push()
envOptions := &options{}
@@ -113,24 +107,31 @@ func NewEnv(container *containers.Container, provider ref.TypeProvider, opts ...
}, nil
}
// Add adds new Decl protos to the Env.
// Returns an error for identifier redeclarations.
func (e *Env) Add(decls ...*exprpb.Decl) error {
// AddIdents configures the checker with a list of variable declarations.
//
// If there are overlapping declarations, the method will error.
func (e *Env) AddIdents(declarations ...*decls.VariableDecl) error {
errMsgs := make([]errorMsg, 0)
for _, decl := range decls {
switch decl.DeclKind.(type) {
case *exprpb.Decl_Ident:
errMsgs = append(errMsgs, e.addIdent(sanitizeIdent(decl)))
case *exprpb.Decl_Function:
errMsgs = append(errMsgs, e.setFunction(sanitizeFunction(decl))...)
}
for _, d := range declarations {
errMsgs = append(errMsgs, e.addIdent(d))
}
return formatError(errMsgs)
}
// AddFunctions configures the checker with a list of function declarations.
//
// If there are overlapping declarations, the method will error.
func (e *Env) AddFunctions(declarations ...*decls.FunctionDecl) error {
errMsgs := make([]errorMsg, 0)
for _, d := range declarations {
errMsgs = append(errMsgs, e.setFunction(d)...)
}
return formatError(errMsgs)
}
// LookupIdent returns a Decl proto for typeName as an identifier in the Env.
// Returns nil if no such identifier is found in the Env.
func (e *Env) LookupIdent(name string) *exprpb.Decl {
func (e *Env) LookupIdent(name string) *decls.VariableDecl {
for _, candidate := range e.container.ResolveCandidateNames(name) {
if ident := e.declarations.FindIdent(candidate); ident != nil {
return ident
@@ -139,8 +140,8 @@ func (e *Env) LookupIdent(name string) *exprpb.Decl {
// Next try to import the name as a reference to a message type. If found,
// the declaration is added to the outest (global) scope of the
// environment, so next time we can access it faster.
if t, found := e.provider.FindType(candidate); found {
decl := decls.NewVar(candidate, t)
if t, found := e.provider.FindStructType(candidate); found {
decl := decls.NewVariable(candidate, t)
e.declarations.AddIdent(decl)
return decl
}
@@ -148,11 +149,7 @@ func (e *Env) LookupIdent(name string) *exprpb.Decl {
// Next try to import this as an enum value by splitting the name in a type prefix and
// the enum inside.
if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType {
decl := decls.NewIdent(candidate,
decls.Int,
&exprpb.Constant{
ConstantKind: &exprpb.Constant_Int64Value{
Int64Value: int64(enumValue.(types.Int))}})
decl := decls.NewConstant(candidate, types.IntType, enumValue)
e.declarations.AddIdent(decl)
return decl
}
@@ -162,7 +159,7 @@ func (e *Env) LookupIdent(name string) *exprpb.Decl {
// LookupFunction returns a Decl proto for typeName as a function in env.
// Returns nil if no such function is found in env.
func (e *Env) LookupFunction(name string) *exprpb.Decl {
func (e *Env) LookupFunction(name string) *decls.FunctionDecl {
for _, candidate := range e.container.ResolveCandidateNames(name) {
if fn := e.declarations.FindFunction(candidate); fn != nil {
return fn
@@ -171,88 +168,46 @@ func (e *Env) LookupFunction(name string) *exprpb.Decl {
return nil
}
// addOverload adds overload to function declaration f.
// Returns one or more errorMsg values if the overload overlaps with an existing overload or macro.
func (e *Env) addOverload(f *exprpb.Decl, overload *exprpb.Decl_FunctionDecl_Overload) []errorMsg {
errMsgs := make([]errorMsg, 0)
function := f.GetFunction()
emptyMappings := newMapping()
overloadFunction := decls.NewFunctionType(overload.GetResultType(),
overload.GetParams()...)
overloadErased := substitute(emptyMappings, overloadFunction, true)
for _, existing := range function.GetOverloads() {
existingFunction := decls.NewFunctionType(existing.GetResultType(), existing.GetParams()...)
existingErased := substitute(emptyMappings, existingFunction, true)
overlap := isAssignable(emptyMappings, overloadErased, existingErased) != nil ||
isAssignable(emptyMappings, existingErased, overloadErased) != nil
if overlap &&
overload.GetIsInstanceFunction() == existing.GetIsInstanceFunction() {
errMsgs = append(errMsgs,
overlappingOverloadError(f.Name,
overload.GetOverloadId(), overloadFunction,
existing.GetOverloadId(), existingFunction))
}
}
for _, macro := range parser.AllMacros {
if macro.Function() == f.Name &&
macro.IsReceiverStyle() == overload.GetIsInstanceFunction() &&
macro.ArgCount() == len(overload.GetParams()) {
errMsgs = append(errMsgs, overlappingMacroError(f.Name, macro.ArgCount()))
}
}
if len(errMsgs) > 0 {
return errMsgs
}
function.Overloads = append(function.GetOverloads(), overload)
return errMsgs
}
// setFunction adds the function Decl to the Env.
// Adds a function decl if one doesn't already exist, then adds all overloads from the Decl.
// If overload overlaps with an existing overload, adds to the errors in the Env instead.
func (e *Env) setFunction(decl *exprpb.Decl) []errorMsg {
errorMsgs := make([]errorMsg, 0)
overloads := decl.GetFunction().GetOverloads()
current := e.declarations.FindFunction(decl.Name)
if current == nil {
//Add the function declaration without overloads and check the overloads below.
current = decls.NewFunction(decl.Name)
} else {
existingOverloads := map[string]*exprpb.Decl_FunctionDecl_Overload{}
for _, overload := range current.GetFunction().GetOverloads() {
existingOverloads[overload.GetOverloadId()] = overload
func (e *Env) setFunction(fn *decls.FunctionDecl) []errorMsg {
errMsgs := make([]errorMsg, 0)
current := e.declarations.FindFunction(fn.Name())
if current != nil {
var err error
current, err = current.Merge(fn)
if err != nil {
return append(errMsgs, errorMsg(err.Error()))
}
newOverloads := []*exprpb.Decl_FunctionDecl_Overload{}
for _, overload := range overloads {
existing, found := existingOverloads[overload.GetOverloadId()]
if !found || !overloadsEqual(existing, overload) {
newOverloads = append(newOverloads, overload)
} else {
current = fn
}
for _, overload := range current.OverloadDecls() {
for _, macro := range parser.AllMacros {
if macro.Function() == current.Name() &&
macro.IsReceiverStyle() == overload.IsMemberFunction() &&
macro.ArgCount() == len(overload.ArgTypes()) {
errMsgs = append(errMsgs, overlappingMacroError(current.Name(), macro.ArgCount()))
}
}
overloads = newOverloads
if len(newOverloads) == 0 {
return errorMsgs
if len(errMsgs) > 0 {
return errMsgs
}
// Copy on write since we don't know where this original definition came from.
current = proto.Clone(current).(*exprpb.Decl)
}
e.declarations.SetFunction(current)
for _, overload := range overloads {
errorMsgs = append(errorMsgs, e.addOverload(current, overload)...)
}
return errorMsgs
return errMsgs
}
// addIdent adds the Decl to the declarations in the Env.
// Returns a non-empty errorMsg if the identifier is already declared in the scope.
func (e *Env) addIdent(decl *exprpb.Decl) errorMsg {
current := e.declarations.FindIdentInScope(decl.Name)
func (e *Env) addIdent(decl *decls.VariableDecl) errorMsg {
current := e.declarations.FindIdentInScope(decl.Name())
if current != nil {
if proto.Equal(current, decl) {
if current.DeclarationIsEquivalent(decl) {
return ""
}
return overlappingIdentifierError(decl.Name)
return overlappingIdentifierError(decl.Name())
}
e.declarations.AddIdent(decl)
return ""
@@ -264,111 +219,9 @@ func (e *Env) isOverloadDisabled(overloadID string) bool {
return found
}
// overloadsEqual returns whether two overloads have identical signatures.
//
// type parameter names are ignored as they may be specified in any order and have no bearing on overload
// equivalence
func overloadsEqual(o1, o2 *exprpb.Decl_FunctionDecl_Overload) bool {
return o1.GetOverloadId() == o2.GetOverloadId() &&
o1.GetIsInstanceFunction() == o2.GetIsInstanceFunction() &&
paramsEqual(o1.GetParams(), o2.GetParams()) &&
proto.Equal(o1.GetResultType(), o2.GetResultType())
}
// paramsEqual returns whether two lists have equal length and all types are equal
func paramsEqual(p1, p2 []*exprpb.Type) bool {
if len(p1) != len(p2) {
return false
}
for i, a := range p1 {
b := p2[i]
if !proto.Equal(a, b) {
return false
}
}
return true
}
// sanitizeFunction replaces well-known types referenced by message name with their equivalent
// CEL built-in type instances.
func sanitizeFunction(decl *exprpb.Decl) *exprpb.Decl {
fn := decl.GetFunction()
// Determine whether the declaration requires replacements from proto-based message type
// references to well-known CEL type references.
var needsSanitizing bool
for _, o := range fn.GetOverloads() {
if isObjectWellKnownType(o.GetResultType()) {
needsSanitizing = true
break
}
for _, p := range o.GetParams() {
if isObjectWellKnownType(p) {
needsSanitizing = true
break
}
}
}
// Early return if the declaration requires no modification.
if !needsSanitizing {
return decl
}
// Sanitize all of the overloads if any overload requires an update to its type references.
overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(fn.GetOverloads()))
for i, o := range fn.GetOverloads() {
rt := o.GetResultType()
if isObjectWellKnownType(rt) {
rt = getObjectWellKnownType(rt)
}
params := make([]*exprpb.Type, len(o.GetParams()))
copy(params, o.GetParams())
for j, p := range params {
if isObjectWellKnownType(p) {
params[j] = getObjectWellKnownType(p)
}
}
// If sanitized, replace the overload definition.
if o.IsInstanceFunction {
overloads[i] =
decls.NewInstanceOverload(o.GetOverloadId(), params, rt)
} else {
overloads[i] =
decls.NewOverload(o.GetOverloadId(), params, rt)
}
}
return decls.NewFunction(decl.GetName(), overloads...)
}
// sanitizeIdent replaces the identifier's well-known types referenced by message name with
// references to CEL built-in type instances.
func sanitizeIdent(decl *exprpb.Decl) *exprpb.Decl {
id := decl.GetIdent()
t := id.GetType()
if !isObjectWellKnownType(t) {
return decl
}
return decls.NewIdent(decl.GetName(), getObjectWellKnownType(t), id.GetValue())
}
// isObjectWellKnownType returns true if the input type is an OBJECT type with a message name
// that corresponds the message name of a built-in CEL type.
func isObjectWellKnownType(t *exprpb.Type) bool {
if kindOf(t) != kindObject {
return false
}
_, found := pb.CheckedWellKnowns[t.GetMessageType()]
return found
}
// getObjectWellKnownType returns the built-in CEL type declaration for input type's message name.
func getObjectWellKnownType(t *exprpb.Type) *exprpb.Type {
return pb.CheckedWellKnowns[t.GetMessageType()]
}
// validatedDeclarations returns a reference to the validated variable and function declaration scope stack.
// must be copied before use.
func (e *Env) validatedDeclarations() *decls.Scopes {
func (e *Env) validatedDeclarations() *Scopes {
return e.declarations
}
@@ -402,19 +255,6 @@ func overlappingIdentifierError(name string) errorMsg {
return errorMsg(fmt.Sprintf("overlapping identifier for name '%s'", name))
}
func overlappingOverloadError(name string,
overloadID1 string, f1 *exprpb.Type,
overloadID2 string, f2 *exprpb.Type) errorMsg {
return errorMsg(fmt.Sprintf(
"overlapping overload for name '%s' (type '%s' with overloadId: '%s' "+
"cannot be distinguished from '%s' with overloadId: '%s')",
name,
FormatCheckedType(f1),
overloadID1,
FormatCheckedType(f2),
overloadID2))
}
func overlappingMacroError(name string, argCount int) errorMsg {
return errorMsg(fmt.Sprintf(
"overlapping macro for name '%s' with %d args", name, argCount))

View File

@@ -15,82 +15,78 @@
package checker
import (
"reflect"
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/types"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// typeErrors is a specialization of Errors.
type typeErrors struct {
*common.Errors
errs *common.Errors
}
func (e *typeErrors) undeclaredReference(l common.Location, container string, name string) {
e.ReportError(l, "undeclared reference to '%s' (in container '%s')", name, container)
func (e *typeErrors) fieldTypeMismatch(id int64, l common.Location, name string, field, value *types.Type) {
e.errs.ReportErrorAtID(id, l, "expected type of field '%s' is '%s' but provided type is '%s'",
name, FormatCELType(field), FormatCELType(value))
}
func (e *typeErrors) typeDoesNotSupportFieldSelection(l common.Location, t *exprpb.Type) {
e.ReportError(l, "type '%s' does not support field selection", t)
func (e *typeErrors) incompatibleType(id int64, l common.Location, ex *exprpb.Expr, prev, next *types.Type) {
e.errs.ReportErrorAtID(id, l,
"incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next)
}
func (e *typeErrors) undefinedField(l common.Location, field string) {
e.ReportError(l, "undefined field '%s'", field)
func (e *typeErrors) noMatchingOverload(id int64, l common.Location, name string, args []*types.Type, isInstance bool) {
signature := formatFunctionDeclType(nil, args, isInstance)
e.errs.ReportErrorAtID(id, l, "found no matching overload for '%s' applied to '%s'", name, signature)
}
func (e *typeErrors) noMatchingOverload(l common.Location, name string, args []*exprpb.Type, isInstance bool) {
signature := formatFunction(nil, args, isInstance)
e.ReportError(l, "found no matching overload for '%s' applied to '%s'", name, signature)
func (e *typeErrors) notAComprehensionRange(id int64, l common.Location, t *types.Type) {
e.errs.ReportErrorAtID(id, l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)",
FormatCELType(t))
}
func (e *typeErrors) notAType(l common.Location, t *exprpb.Type) {
e.ReportError(l, "'%s(%v)' is not a type", FormatCheckedType(t), t)
func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field *exprpb.Expr) {
e.errs.ReportErrorAtID(id, l, "unsupported optional field selection: %v", field)
}
func (e *typeErrors) notAMessageType(l common.Location, t *exprpb.Type) {
e.ReportError(l, "'%s' is not a message type", FormatCheckedType(t))
func (e *typeErrors) notAType(id int64, l common.Location, typeName string) {
e.errs.ReportErrorAtID(id, l, "'%s' is not a type", typeName)
}
func (e *typeErrors) fieldTypeMismatch(l common.Location, name string, field *exprpb.Type, value *exprpb.Type) {
e.ReportError(l, "expected type of field '%s' is '%s' but provided type is '%s'",
name, FormatCheckedType(field), FormatCheckedType(value))
func (e *typeErrors) notAMessageType(id int64, l common.Location, typeName string) {
e.errs.ReportErrorAtID(id, l, "'%s' is not a message type", typeName)
}
func (e *typeErrors) unexpectedFailedResolution(l common.Location, typeName string) {
e.ReportError(l, "[internal] unexpected failed resolution of '%s'", typeName)
func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex *exprpb.Expr, prev, next *ast.ReferenceInfo) {
e.errs.ReportErrorAtID(id, l,
"reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next)
}
func (e *typeErrors) notAComprehensionRange(l common.Location, t *exprpb.Type) {
e.ReportError(l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)",
FormatCheckedType(t))
func (e *typeErrors) typeDoesNotSupportFieldSelection(id int64, l common.Location, t *types.Type) {
e.errs.ReportErrorAtID(id, l, "type '%s' does not support field selection", FormatCELType(t))
}
func (e *typeErrors) typeMismatch(l common.Location, expected *exprpb.Type, actual *exprpb.Type) {
e.ReportError(l, "expected type '%s' but found '%s'",
FormatCheckedType(expected), FormatCheckedType(actual))
func (e *typeErrors) typeMismatch(id int64, l common.Location, expected, actual *types.Type) {
e.errs.ReportErrorAtID(id, l, "expected type '%s' but found '%s'",
FormatCELType(expected), FormatCELType(actual))
}
func formatFunction(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string {
result := ""
if isInstance {
target := argTypes[0]
argTypes = argTypes[1:]
result += FormatCheckedType(target)
result += "."
}
result += "("
for i, arg := range argTypes {
if i > 0 {
result += ", "
}
result += FormatCheckedType(arg)
}
result += ")"
if resultType != nil {
result += " -> "
result += FormatCheckedType(resultType)
}
return result
func (e *typeErrors) undefinedField(id int64, l common.Location, field string) {
e.errs.ReportErrorAtID(id, l, "undefined field '%s'", field)
}
func (e *typeErrors) undeclaredReference(id int64, l common.Location, container string, name string) {
e.errs.ReportErrorAtID(id, l, "undeclared reference to '%s' (in container '%s')", name, container)
}
func (e *typeErrors) unexpectedFailedResolution(id int64, l common.Location, typeName string) {
e.errs.ReportErrorAtID(id, l, "unexpected failed resolution of '%s'", typeName)
}
func (e *typeErrors) unexpectedASTType(id int64, l common.Location, ex *exprpb.Expr) {
e.errs.ReportErrorAtID(id, l, "unrecognized ast type: %v", reflect.TypeOf(ex))
}

216
vendor/github.com/google/cel-go/checker/format.go generated vendored Normal file
View File

@@ -0,0 +1,216 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package checker
import (
"fmt"
"strings"
chkdecls "github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common/types"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
const (
kindUnknown = iota + 1
kindError
kindFunction
kindDyn
kindPrimitive
kindWellKnown
kindWrapper
kindNull
kindAbstract
kindType
kindList
kindMap
kindObject
kindTypeParam
)
// FormatCheckedType converts a type message into a string representation.
func FormatCheckedType(t *exprpb.Type) string {
switch kindOf(t) {
case kindDyn:
return "dyn"
case kindFunction:
return formatFunctionExprType(t.GetFunction().GetResultType(),
t.GetFunction().GetArgTypes(),
false)
case kindList:
return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().GetElemType()))
case kindObject:
return t.GetMessageType()
case kindMap:
return fmt.Sprintf("map(%s, %s)",
FormatCheckedType(t.GetMapType().GetKeyType()),
FormatCheckedType(t.GetMapType().GetValueType()))
case kindNull:
return "null"
case kindPrimitive:
switch t.GetPrimitive() {
case exprpb.Type_UINT64:
return "uint"
case exprpb.Type_INT64:
return "int"
}
return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ")
case kindType:
if t.GetType() == nil || t.GetType().GetTypeKind() == nil {
return "type"
}
return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType()))
case kindWellKnown:
switch t.GetWellKnown() {
case exprpb.Type_ANY:
return "any"
case exprpb.Type_DURATION:
return "duration"
case exprpb.Type_TIMESTAMP:
return "timestamp"
}
case kindWrapper:
return fmt.Sprintf("wrapper(%s)",
FormatCheckedType(chkdecls.NewPrimitiveType(t.GetWrapper())))
case kindError:
return "!error!"
case kindTypeParam:
return t.GetTypeParam()
case kindAbstract:
at := t.GetAbstractType()
params := at.GetParameterTypes()
paramStrs := make([]string, len(params))
for i, p := range params {
paramStrs[i] = FormatCheckedType(p)
}
return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", "))
}
return t.String()
}
type formatter func(any) string
// FormatCELType formats a types.Type value to a string representation.
//
// The type formatting is identical to FormatCheckedType.
func FormatCELType(t any) string {
dt := t.(*types.Type)
switch dt.Kind() {
case types.AnyKind:
return "any"
case types.DurationKind:
return "duration"
case types.ErrorKind:
return "!error!"
case types.NullTypeKind:
return "null"
case types.TimestampKind:
return "timestamp"
case types.TypeParamKind:
return dt.TypeName()
case types.OpaqueKind:
if dt.TypeName() == "function" {
// There is no explicit function type in the new types representation, so information like
// whether the function is a member function is absent.
return formatFunctionDeclType(dt.Parameters()[0], dt.Parameters()[1:], false)
}
case types.UnspecifiedKind:
return ""
}
if len(dt.Parameters()) == 0 {
return dt.DeclaredTypeName()
}
paramTypeNames := make([]string, 0, len(dt.Parameters()))
for _, p := range dt.Parameters() {
paramTypeNames = append(paramTypeNames, FormatCELType(p))
}
return fmt.Sprintf("%s(%s)", dt.TypeName(), strings.Join(paramTypeNames, ", "))
}
func formatExprType(t any) string {
if t == nil {
return ""
}
return FormatCheckedType(t.(*exprpb.Type))
}
func formatFunctionExprType(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string {
return formatFunctionInternal[*exprpb.Type](resultType, argTypes, isInstance, formatExprType)
}
func formatFunctionDeclType(resultType *types.Type, argTypes []*types.Type, isInstance bool) string {
return formatFunctionInternal[*types.Type](resultType, argTypes, isInstance, FormatCELType)
}
func formatFunctionInternal[T any](resultType T, argTypes []T, isInstance bool, format formatter) string {
result := ""
if isInstance {
target := argTypes[0]
argTypes = argTypes[1:]
result += format(target)
result += "."
}
result += "("
for i, arg := range argTypes {
if i > 0 {
result += ", "
}
result += format(arg)
}
result += ")"
rt := format(resultType)
if rt != "" {
result += " -> "
result += rt
}
return result
}
// kindOf returns the kind of the type as defined in the checked.proto.
func kindOf(t *exprpb.Type) int {
if t == nil || t.TypeKind == nil {
return kindUnknown
}
switch t.GetTypeKind().(type) {
case *exprpb.Type_Error:
return kindError
case *exprpb.Type_Function:
return kindFunction
case *exprpb.Type_Dyn:
return kindDyn
case *exprpb.Type_Primitive:
return kindPrimitive
case *exprpb.Type_WellKnown:
return kindWellKnown
case *exprpb.Type_Wrapper:
return kindWrapper
case *exprpb.Type_Null:
return kindNull
case *exprpb.Type_Type:
return kindType
case *exprpb.Type_ListType_:
return kindList
case *exprpb.Type_MapType_:
return kindMap
case *exprpb.Type_MessageType:
return kindObject
case *exprpb.Type_TypeParam:
return kindTypeParam
case *exprpb.Type_AbstractType_:
return kindAbstract
}
return kindUnknown
}

View File

@@ -15,25 +15,25 @@
package checker
import (
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
"github.com/google/cel-go/common/types"
)
type mapping struct {
mapping map[string]*exprpb.Type
mapping map[string]*types.Type
}
func newMapping() *mapping {
return &mapping{
mapping: make(map[string]*exprpb.Type),
mapping: make(map[string]*types.Type),
}
}
func (m *mapping) add(from *exprpb.Type, to *exprpb.Type) {
m.mapping[typeKey(from)] = to
func (m *mapping) add(from, to *types.Type) {
m.mapping[FormatCELType(from)] = to
}
func (m *mapping) find(from *exprpb.Type) (*exprpb.Type, bool) {
if r, found := m.mapping[typeKey(from)]; found {
func (m *mapping) find(from *types.Type) (*types.Type, bool) {
if r, found := m.mapping[FormatCELType(from)]; found {
return r, found
}
return nil, false

View File

@@ -14,12 +14,10 @@
package checker
import "github.com/google/cel-go/checker/decls"
type options struct {
crossTypeNumericComparisons bool
homogeneousAggregateLiterals bool
validatedDeclarations *decls.Scopes
validatedDeclarations *Scopes
}
// Option is a functional option for configuring the type-checker
@@ -34,15 +32,6 @@ func CrossTypeNumericComparisons(enabled bool) Option {
}
}
// HomogeneousAggregateLiterals toggles support for constructing lists and maps whose elements all
// have the same type.
func HomogeneousAggregateLiterals(enabled bool) Option {
return func(opts *options) error {
opts.homogeneousAggregateLiterals = enabled
return nil
}
}
// ValidatedDeclarations provides a references to validated declarations which will be copied
// into new checker instances.
func ValidatedDeclarations(env *Env) Option {

View File

@@ -15,6 +15,8 @@
package checker
import (
"sort"
"github.com/google/cel-go/common/debug"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
@@ -47,6 +49,7 @@ func (a *semanticAdorner) GetMetadata(elem any) string {
if len(ref.GetOverloadId()) == 0 {
result += "^" + ref.Name
} else {
sort.Strings(ref.GetOverloadId())
for i, overload := range ref.GetOverloadId() {
if i == 0 {
result += "^"

View File

@@ -12,9 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package decls
package checker
import exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
import (
"github.com/google/cel-go/common/decls"
)
// Scopes represents nested Decl sets where the Scopes value contains a Groups containing all
// identifiers in scope and an optional parent representing outer scopes.
@@ -25,9 +27,9 @@ type Scopes struct {
scopes *Group
}
// NewScopes creates a new, empty Scopes.
// newScopes creates a new, empty Scopes.
// Some operations can't be safely performed until a Group is added with Push.
func NewScopes() *Scopes {
func newScopes() *Scopes {
return &Scopes{
scopes: newGroup(),
}
@@ -35,7 +37,7 @@ func NewScopes() *Scopes {
// Copy creates a copy of the current Scopes values, including a copy of its parent if non-nil.
func (s *Scopes) Copy() *Scopes {
cpy := NewScopes()
cpy := newScopes()
if s == nil {
return cpy
}
@@ -66,14 +68,14 @@ func (s *Scopes) Pop() *Scopes {
// AddIdent adds the ident Decl in the current scope.
// Note: If the name collides with an existing identifier in the scope, the Decl is overwritten.
func (s *Scopes) AddIdent(decl *exprpb.Decl) {
s.scopes.idents[decl.Name] = decl
func (s *Scopes) AddIdent(decl *decls.VariableDecl) {
s.scopes.idents[decl.Name()] = decl
}
// FindIdent finds the first ident Decl with a matching name in Scopes, or nil if one cannot be
// found.
// Note: The search is performed from innermost to outermost.
func (s *Scopes) FindIdent(name string) *exprpb.Decl {
func (s *Scopes) FindIdent(name string) *decls.VariableDecl {
if ident, found := s.scopes.idents[name]; found {
return ident
}
@@ -86,7 +88,7 @@ func (s *Scopes) FindIdent(name string) *exprpb.Decl {
// FindIdentInScope finds the first ident Decl with a matching name in the current Scopes value, or
// nil if one does not exist.
// Note: The search is only performed on the current scope and does not search outer scopes.
func (s *Scopes) FindIdentInScope(name string) *exprpb.Decl {
func (s *Scopes) FindIdentInScope(name string) *decls.VariableDecl {
if ident, found := s.scopes.idents[name]; found {
return ident
}
@@ -95,14 +97,14 @@ func (s *Scopes) FindIdentInScope(name string) *exprpb.Decl {
// SetFunction adds the function Decl to the current scope.
// Note: Any previous entry for a function in the current scope with the same name is overwritten.
func (s *Scopes) SetFunction(fn *exprpb.Decl) {
s.scopes.functions[fn.Name] = fn
func (s *Scopes) SetFunction(fn *decls.FunctionDecl) {
s.scopes.functions[fn.Name()] = fn
}
// FindFunction finds the first function Decl with a matching name in Scopes.
// The search is performed from innermost to outermost.
// Returns nil if no such function in Scopes.
func (s *Scopes) FindFunction(name string) *exprpb.Decl {
func (s *Scopes) FindFunction(name string) *decls.FunctionDecl {
if fn, found := s.scopes.functions[name]; found {
return fn
}
@@ -116,16 +118,16 @@ func (s *Scopes) FindFunction(name string) *exprpb.Decl {
// Contains separate namespaces for identifier and function Decls.
// (Should be named "Scope" perhaps?)
type Group struct {
idents map[string]*exprpb.Decl
functions map[string]*exprpb.Decl
idents map[string]*decls.VariableDecl
functions map[string]*decls.FunctionDecl
}
// copy creates a new Group instance with a shallow copy of the variables and functions.
// If callers need to mutate the exprpb.Decl definitions for a Function, they should copy-on-write.
func (g *Group) copy() *Group {
cpy := &Group{
idents: make(map[string]*exprpb.Decl, len(g.idents)),
functions: make(map[string]*exprpb.Decl, len(g.functions)),
idents: make(map[string]*decls.VariableDecl, len(g.idents)),
functions: make(map[string]*decls.FunctionDecl, len(g.functions)),
}
for n, id := range g.idents {
cpy.idents[n] = id
@@ -139,7 +141,7 @@ func (g *Group) copy() *Group {
// newGroup creates a new Group with empty maps for identifiers and functions.
func newGroup() *Group {
return &Group{
idents: make(map[string]*exprpb.Decl),
functions: make(map[string]*exprpb.Decl),
idents: make(map[string]*decls.VariableDecl),
functions: make(map[string]*decls.FunctionDecl),
}
}

View File

@@ -15,480 +15,21 @@
package checker
import (
"github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/stdlib"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
var (
standardDeclarations []*exprpb.Decl
)
func init() {
// Some shortcuts we use when building declarations.
paramA := decls.NewTypeParamType("A")
typeParamAList := []string{"A"}
listOfA := decls.NewListType(paramA)
paramB := decls.NewTypeParamType("B")
typeParamABList := []string{"A", "B"}
mapOfAB := decls.NewMapType(paramA, paramB)
var idents []*exprpb.Decl
for _, t := range []*exprpb.Type{
decls.Int, decls.Uint, decls.Bool,
decls.Double, decls.Bytes, decls.String} {
idents = append(idents,
decls.NewVar(FormatCheckedType(t), decls.NewTypeType(t)))
}
idents = append(idents,
decls.NewVar("list", decls.NewTypeType(listOfA)),
decls.NewVar("map", decls.NewTypeType(mapOfAB)),
decls.NewVar("null_type", decls.NewTypeType(decls.Null)),
decls.NewVar("type", decls.NewTypeType(decls.NewTypeType(nil))))
standardDeclarations = append(standardDeclarations, idents...)
standardDeclarations = append(standardDeclarations, []*exprpb.Decl{
// Booleans
decls.NewFunction(operators.Conditional,
decls.NewParameterizedOverload(overloads.Conditional,
[]*exprpb.Type{decls.Bool, paramA, paramA}, paramA,
typeParamAList)),
decls.NewFunction(operators.LogicalAnd,
decls.NewOverload(overloads.LogicalAnd,
[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)),
decls.NewFunction(operators.LogicalOr,
decls.NewOverload(overloads.LogicalOr,
[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)),
decls.NewFunction(operators.LogicalNot,
decls.NewOverload(overloads.LogicalNot,
[]*exprpb.Type{decls.Bool}, decls.Bool)),
decls.NewFunction(operators.NotStrictlyFalse,
decls.NewOverload(overloads.NotStrictlyFalse,
[]*exprpb.Type{decls.Bool}, decls.Bool)),
decls.NewFunction(operators.Equals,
decls.NewParameterizedOverload(overloads.Equals,
[]*exprpb.Type{paramA, paramA}, decls.Bool,
typeParamAList)),
decls.NewFunction(operators.NotEquals,
decls.NewParameterizedOverload(overloads.NotEquals,
[]*exprpb.Type{paramA, paramA}, decls.Bool,
typeParamAList)),
// Algebra.
decls.NewFunction(operators.Subtract,
decls.NewOverload(overloads.SubtractInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Int),
decls.NewOverload(overloads.SubtractUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
decls.NewOverload(overloads.SubtractDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Double),
decls.NewOverload(overloads.SubtractTimestampTimestamp,
[]*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Duration),
decls.NewOverload(overloads.SubtractTimestampDuration,
[]*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp),
decls.NewOverload(overloads.SubtractDurationDuration,
[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)),
decls.NewFunction(operators.Multiply,
decls.NewOverload(overloads.MultiplyInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Int),
decls.NewOverload(overloads.MultiplyUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
decls.NewOverload(overloads.MultiplyDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Double)),
decls.NewFunction(operators.Divide,
decls.NewOverload(overloads.DivideInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Int),
decls.NewOverload(overloads.DivideUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
decls.NewOverload(overloads.DivideDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Double)),
decls.NewFunction(operators.Modulo,
decls.NewOverload(overloads.ModuloInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Int),
decls.NewOverload(overloads.ModuloUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint)),
decls.NewFunction(operators.Add,
decls.NewOverload(overloads.AddInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Int),
decls.NewOverload(overloads.AddUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
decls.NewOverload(overloads.AddDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Double),
decls.NewOverload(overloads.AddString,
[]*exprpb.Type{decls.String, decls.String}, decls.String),
decls.NewOverload(overloads.AddBytes,
[]*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bytes),
decls.NewParameterizedOverload(overloads.AddList,
[]*exprpb.Type{listOfA, listOfA}, listOfA,
typeParamAList),
decls.NewOverload(overloads.AddTimestampDuration,
[]*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp),
decls.NewOverload(overloads.AddDurationTimestamp,
[]*exprpb.Type{decls.Duration, decls.Timestamp}, decls.Timestamp),
decls.NewOverload(overloads.AddDurationDuration,
[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)),
decls.NewFunction(operators.Negate,
decls.NewOverload(overloads.NegateInt64,
[]*exprpb.Type{decls.Int}, decls.Int),
decls.NewOverload(overloads.NegateDouble,
[]*exprpb.Type{decls.Double}, decls.Double)),
// Index.
decls.NewFunction(operators.Index,
decls.NewParameterizedOverload(overloads.IndexList,
[]*exprpb.Type{listOfA, decls.Int}, paramA,
typeParamAList),
decls.NewParameterizedOverload(overloads.IndexMap,
[]*exprpb.Type{mapOfAB, paramA}, paramB,
typeParamABList)),
// Collections.
decls.NewFunction(overloads.Size,
decls.NewInstanceOverload(overloads.SizeStringInst,
[]*exprpb.Type{decls.String}, decls.Int),
decls.NewInstanceOverload(overloads.SizeBytesInst,
[]*exprpb.Type{decls.Bytes}, decls.Int),
decls.NewParameterizedInstanceOverload(overloads.SizeListInst,
[]*exprpb.Type{listOfA}, decls.Int, typeParamAList),
decls.NewParameterizedInstanceOverload(overloads.SizeMapInst,
[]*exprpb.Type{mapOfAB}, decls.Int, typeParamABList),
decls.NewOverload(overloads.SizeString,
[]*exprpb.Type{decls.String}, decls.Int),
decls.NewOverload(overloads.SizeBytes,
[]*exprpb.Type{decls.Bytes}, decls.Int),
decls.NewParameterizedOverload(overloads.SizeList,
[]*exprpb.Type{listOfA}, decls.Int, typeParamAList),
decls.NewParameterizedOverload(overloads.SizeMap,
[]*exprpb.Type{mapOfAB}, decls.Int, typeParamABList)),
decls.NewFunction(operators.In,
decls.NewParameterizedOverload(overloads.InList,
[]*exprpb.Type{paramA, listOfA}, decls.Bool,
typeParamAList),
decls.NewParameterizedOverload(overloads.InMap,
[]*exprpb.Type{paramA, mapOfAB}, decls.Bool,
typeParamABList)),
// Deprecated 'in()' function.
decls.NewFunction(overloads.DeprecatedIn,
decls.NewParameterizedOverload(overloads.InList,
[]*exprpb.Type{paramA, listOfA}, decls.Bool,
typeParamAList),
decls.NewParameterizedOverload(overloads.InMap,
[]*exprpb.Type{paramA, mapOfAB}, decls.Bool,
typeParamABList)),
// Conversions to type.
decls.NewFunction(overloads.TypeConvertType,
decls.NewParameterizedOverload(overloads.TypeConvertType,
[]*exprpb.Type{paramA}, decls.NewTypeType(paramA), typeParamAList)),
// Conversions to int.
decls.NewFunction(overloads.TypeConvertInt,
decls.NewOverload(overloads.IntToInt, []*exprpb.Type{decls.Int}, decls.Int),
decls.NewOverload(overloads.UintToInt, []*exprpb.Type{decls.Uint}, decls.Int),
decls.NewOverload(overloads.DoubleToInt, []*exprpb.Type{decls.Double}, decls.Int),
decls.NewOverload(overloads.StringToInt, []*exprpb.Type{decls.String}, decls.Int),
decls.NewOverload(overloads.TimestampToInt, []*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewOverload(overloads.DurationToInt, []*exprpb.Type{decls.Duration}, decls.Int)),
// Conversions to uint.
decls.NewFunction(overloads.TypeConvertUint,
decls.NewOverload(overloads.UintToUint, []*exprpb.Type{decls.Uint}, decls.Uint),
decls.NewOverload(overloads.IntToUint, []*exprpb.Type{decls.Int}, decls.Uint),
decls.NewOverload(overloads.DoubleToUint, []*exprpb.Type{decls.Double}, decls.Uint),
decls.NewOverload(overloads.StringToUint, []*exprpb.Type{decls.String}, decls.Uint)),
// Conversions to double.
decls.NewFunction(overloads.TypeConvertDouble,
decls.NewOverload(overloads.DoubleToDouble, []*exprpb.Type{decls.Double}, decls.Double),
decls.NewOverload(overloads.IntToDouble, []*exprpb.Type{decls.Int}, decls.Double),
decls.NewOverload(overloads.UintToDouble, []*exprpb.Type{decls.Uint}, decls.Double),
decls.NewOverload(overloads.StringToDouble, []*exprpb.Type{decls.String}, decls.Double)),
// Conversions to bool.
decls.NewFunction(overloads.TypeConvertBool,
decls.NewOverload(overloads.BoolToBool, []*exprpb.Type{decls.Bool}, decls.Bool),
decls.NewOverload(overloads.StringToBool, []*exprpb.Type{decls.String}, decls.Bool)),
// Conversions to string.
decls.NewFunction(overloads.TypeConvertString,
decls.NewOverload(overloads.StringToString, []*exprpb.Type{decls.String}, decls.String),
decls.NewOverload(overloads.BoolToString, []*exprpb.Type{decls.Bool}, decls.String),
decls.NewOverload(overloads.IntToString, []*exprpb.Type{decls.Int}, decls.String),
decls.NewOverload(overloads.UintToString, []*exprpb.Type{decls.Uint}, decls.String),
decls.NewOverload(overloads.DoubleToString, []*exprpb.Type{decls.Double}, decls.String),
decls.NewOverload(overloads.BytesToString, []*exprpb.Type{decls.Bytes}, decls.String),
decls.NewOverload(overloads.TimestampToString, []*exprpb.Type{decls.Timestamp}, decls.String),
decls.NewOverload(overloads.DurationToString, []*exprpb.Type{decls.Duration}, decls.String)),
// Conversions to bytes.
decls.NewFunction(overloads.TypeConvertBytes,
decls.NewOverload(overloads.BytesToBytes, []*exprpb.Type{decls.Bytes}, decls.Bytes),
decls.NewOverload(overloads.StringToBytes, []*exprpb.Type{decls.String}, decls.Bytes)),
// Conversions to timestamps.
decls.NewFunction(overloads.TypeConvertTimestamp,
decls.NewOverload(overloads.TimestampToTimestamp,
[]*exprpb.Type{decls.Timestamp}, decls.Timestamp),
decls.NewOverload(overloads.StringToTimestamp,
[]*exprpb.Type{decls.String}, decls.Timestamp),
decls.NewOverload(overloads.IntToTimestamp,
[]*exprpb.Type{decls.Int}, decls.Timestamp)),
// Conversions to durations.
decls.NewFunction(overloads.TypeConvertDuration,
decls.NewOverload(overloads.DurationToDuration,
[]*exprpb.Type{decls.Duration}, decls.Duration),
decls.NewOverload(overloads.StringToDuration,
[]*exprpb.Type{decls.String}, decls.Duration),
decls.NewOverload(overloads.IntToDuration,
[]*exprpb.Type{decls.Int}, decls.Duration)),
// Conversions to Dyn.
decls.NewFunction(overloads.TypeConvertDyn,
decls.NewParameterizedOverload(overloads.ToDyn,
[]*exprpb.Type{paramA}, decls.Dyn,
typeParamAList)),
// String functions.
decls.NewFunction(overloads.Contains,
decls.NewInstanceOverload(overloads.ContainsString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
decls.NewFunction(overloads.EndsWith,
decls.NewInstanceOverload(overloads.EndsWithString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
decls.NewFunction(overloads.Matches,
decls.NewOverload(overloads.Matches,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool),
decls.NewInstanceOverload(overloads.MatchesString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
decls.NewFunction(overloads.StartsWith,
decls.NewInstanceOverload(overloads.StartsWithString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
// Date/time functions.
decls.NewFunction(overloads.TimeGetFullYear,
decls.NewInstanceOverload(overloads.TimestampToYear,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToYearWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
decls.NewFunction(overloads.TimeGetMonth,
decls.NewInstanceOverload(overloads.TimestampToMonth,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToMonthWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
decls.NewFunction(overloads.TimeGetDayOfYear,
decls.NewInstanceOverload(overloads.TimestampToDayOfYear,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToDayOfYearWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
decls.NewFunction(overloads.TimeGetDayOfMonth,
decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBased,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
decls.NewFunction(overloads.TimeGetDate,
decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBased,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBasedWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
decls.NewFunction(overloads.TimeGetDayOfWeek,
decls.NewInstanceOverload(overloads.TimestampToDayOfWeek,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToDayOfWeekWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
decls.NewFunction(overloads.TimeGetHours,
decls.NewInstanceOverload(overloads.TimestampToHours,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToHoursWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
decls.NewInstanceOverload(overloads.DurationToHours,
[]*exprpb.Type{decls.Duration}, decls.Int)),
decls.NewFunction(overloads.TimeGetMinutes,
decls.NewInstanceOverload(overloads.TimestampToMinutes,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToMinutesWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
decls.NewInstanceOverload(overloads.DurationToMinutes,
[]*exprpb.Type{decls.Duration}, decls.Int)),
decls.NewFunction(overloads.TimeGetSeconds,
decls.NewInstanceOverload(overloads.TimestampToSeconds,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToSecondsWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
decls.NewInstanceOverload(overloads.DurationToSeconds,
[]*exprpb.Type{decls.Duration}, decls.Int)),
decls.NewFunction(overloads.TimeGetMilliseconds,
decls.NewInstanceOverload(overloads.TimestampToMilliseconds,
[]*exprpb.Type{decls.Timestamp}, decls.Int),
decls.NewInstanceOverload(overloads.TimestampToMillisecondsWithTz,
[]*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
decls.NewInstanceOverload(overloads.DurationToMilliseconds,
[]*exprpb.Type{decls.Duration}, decls.Int)),
// Relations.
decls.NewFunction(operators.Less,
decls.NewOverload(overloads.LessBool,
[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
decls.NewOverload(overloads.LessInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
decls.NewOverload(overloads.LessInt64Double,
[]*exprpb.Type{decls.Int, decls.Double}, decls.Bool),
decls.NewOverload(overloads.LessInt64Uint64,
[]*exprpb.Type{decls.Int, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.LessUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.LessUint64Double,
[]*exprpb.Type{decls.Uint, decls.Double}, decls.Bool),
decls.NewOverload(overloads.LessUint64Int64,
[]*exprpb.Type{decls.Uint, decls.Int}, decls.Bool),
decls.NewOverload(overloads.LessDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
decls.NewOverload(overloads.LessDoubleInt64,
[]*exprpb.Type{decls.Double, decls.Int}, decls.Bool),
decls.NewOverload(overloads.LessDoubleUint64,
[]*exprpb.Type{decls.Double, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.LessString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool),
decls.NewOverload(overloads.LessBytes,
[]*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
decls.NewOverload(overloads.LessTimestamp,
[]*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
decls.NewOverload(overloads.LessDuration,
[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
decls.NewFunction(operators.LessEquals,
decls.NewOverload(overloads.LessEqualsBool,
[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
decls.NewOverload(overloads.LessEqualsInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
decls.NewOverload(overloads.LessEqualsInt64Double,
[]*exprpb.Type{decls.Int, decls.Double}, decls.Bool),
decls.NewOverload(overloads.LessEqualsInt64Uint64,
[]*exprpb.Type{decls.Int, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.LessEqualsUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.LessEqualsUint64Double,
[]*exprpb.Type{decls.Uint, decls.Double}, decls.Bool),
decls.NewOverload(overloads.LessEqualsUint64Int64,
[]*exprpb.Type{decls.Uint, decls.Int}, decls.Bool),
decls.NewOverload(overloads.LessEqualsDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
decls.NewOverload(overloads.LessEqualsDoubleInt64,
[]*exprpb.Type{decls.Double, decls.Int}, decls.Bool),
decls.NewOverload(overloads.LessEqualsDoubleUint64,
[]*exprpb.Type{decls.Double, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.LessEqualsString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool),
decls.NewOverload(overloads.LessEqualsBytes,
[]*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
decls.NewOverload(overloads.LessEqualsTimestamp,
[]*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
decls.NewOverload(overloads.LessEqualsDuration,
[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
decls.NewFunction(operators.Greater,
decls.NewOverload(overloads.GreaterBool,
[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
decls.NewOverload(overloads.GreaterInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
decls.NewOverload(overloads.GreaterInt64Double,
[]*exprpb.Type{decls.Int, decls.Double}, decls.Bool),
decls.NewOverload(overloads.GreaterInt64Uint64,
[]*exprpb.Type{decls.Int, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.GreaterUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.GreaterUint64Double,
[]*exprpb.Type{decls.Uint, decls.Double}, decls.Bool),
decls.NewOverload(overloads.GreaterUint64Int64,
[]*exprpb.Type{decls.Uint, decls.Int}, decls.Bool),
decls.NewOverload(overloads.GreaterDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
decls.NewOverload(overloads.GreaterDoubleInt64,
[]*exprpb.Type{decls.Double, decls.Int}, decls.Bool),
decls.NewOverload(overloads.GreaterDoubleUint64,
[]*exprpb.Type{decls.Double, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.GreaterString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool),
decls.NewOverload(overloads.GreaterBytes,
[]*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
decls.NewOverload(overloads.GreaterTimestamp,
[]*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
decls.NewOverload(overloads.GreaterDuration,
[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
decls.NewFunction(operators.GreaterEquals,
decls.NewOverload(overloads.GreaterEqualsBool,
[]*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsInt64,
[]*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsInt64Double,
[]*exprpb.Type{decls.Int, decls.Double}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsInt64Uint64,
[]*exprpb.Type{decls.Int, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsUint64,
[]*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsUint64Double,
[]*exprpb.Type{decls.Uint, decls.Double}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsUint64Int64,
[]*exprpb.Type{decls.Uint, decls.Int}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsDouble,
[]*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsDoubleInt64,
[]*exprpb.Type{decls.Double, decls.Int}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsDoubleUint64,
[]*exprpb.Type{decls.Double, decls.Uint}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsBytes,
[]*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsTimestamp,
[]*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
decls.NewOverload(overloads.GreaterEqualsDuration,
[]*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
}...)
// StandardFunctions returns the Decls for all functions in the evaluator.
//
// Deprecated: prefer stdlib.FunctionExprDecls()
func StandardFunctions() []*exprpb.Decl {
return stdlib.FunctionExprDecls()
}
// StandardDeclarations returns the Decls for all functions and constants in the evaluator.
func StandardDeclarations() []*exprpb.Decl {
return standardDeclarations
// StandardTypes returns the set of type identifiers for standard library types.
//
// Deprecated: prefer stdlib.TypeExprDecls()
func StandardTypes() []*exprpb.Decl {
return stdlib.TypeExprDecls()
}

View File

@@ -15,154 +15,54 @@
package checker
import (
"fmt"
"strings"
"github.com/google/cel-go/checker/decls"
"google.golang.org/protobuf/proto"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
"github.com/google/cel-go/common/types"
)
const (
kindUnknown = iota + 1
kindError
kindFunction
kindDyn
kindPrimitive
kindWellKnown
kindWrapper
kindNull
kindAbstract
kindType
kindList
kindMap
kindObject
kindTypeParam
)
// FormatCheckedType converts a type message into a string representation.
func FormatCheckedType(t *exprpb.Type) string {
switch kindOf(t) {
case kindDyn:
return "dyn"
case kindFunction:
return formatFunction(t.GetFunction().GetResultType(),
t.GetFunction().GetArgTypes(),
false)
case kindList:
return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().GetElemType()))
case kindObject:
return t.GetMessageType()
case kindMap:
return fmt.Sprintf("map(%s, %s)",
FormatCheckedType(t.GetMapType().GetKeyType()),
FormatCheckedType(t.GetMapType().GetValueType()))
case kindNull:
return "null"
case kindPrimitive:
switch t.GetPrimitive() {
case exprpb.Type_UINT64:
return "uint"
case exprpb.Type_INT64:
return "int"
}
return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ")
case kindType:
if t.GetType() == nil {
return "type"
}
return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType()))
case kindWellKnown:
switch t.GetWellKnown() {
case exprpb.Type_ANY:
return "any"
case exprpb.Type_DURATION:
return "duration"
case exprpb.Type_TIMESTAMP:
return "timestamp"
}
case kindWrapper:
return fmt.Sprintf("wrapper(%s)",
FormatCheckedType(decls.NewPrimitiveType(t.GetWrapper())))
case kindError:
return "!error!"
case kindTypeParam:
return t.GetTypeParam()
case kindAbstract:
at := t.GetAbstractType()
params := at.GetParameterTypes()
paramStrs := make([]string, len(params))
for i, p := range params {
paramStrs[i] = FormatCheckedType(p)
}
return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", "))
}
return t.String()
}
// isDyn returns true if the input t is either type DYN or a well-known ANY message.
func isDyn(t *exprpb.Type) bool {
func isDyn(t *types.Type) bool {
// Note: object type values that are well-known and map to a DYN value in practice
// are sanitized prior to being added to the environment.
switch kindOf(t) {
case kindDyn:
switch t.Kind() {
case types.DynKind, types.AnyKind:
return true
case kindWellKnown:
return t.GetWellKnown() == exprpb.Type_ANY
default:
return false
}
}
// isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message.
func isDynOrError(t *exprpb.Type) bool {
func isDynOrError(t *types.Type) bool {
return isError(t) || isDyn(t)
}
func isError(t *exprpb.Type) bool {
return kindOf(t) == kindError
func isError(t *types.Type) bool {
return t.Kind() == types.ErrorKind
}
func isOptional(t *exprpb.Type) bool {
if kindOf(t) == kindAbstract {
at := t.GetAbstractType()
return at.GetName() == "optional"
func isOptional(t *types.Type) bool {
if t.Kind() == types.OpaqueKind {
return t.TypeName() == "optional"
}
return false
}
func maybeUnwrapOptional(t *exprpb.Type) (*exprpb.Type, bool) {
func maybeUnwrapOptional(t *types.Type) (*types.Type, bool) {
if isOptional(t) {
at := t.GetAbstractType()
return at.GetParameterTypes()[0], true
return t.Parameters()[0], true
}
return t, false
}
func maybeUnwrapString(e *exprpb.Expr) (string, bool) {
switch e.GetExprKind().(type) {
case *exprpb.Expr_ConstExpr:
literal := e.GetConstExpr()
switch literal.GetConstantKind().(type) {
case *exprpb.Constant_StringValue:
return literal.GetStringValue(), true
}
}
return "", false
}
// isEqualOrLessSpecific checks whether one type is equal or less specific than the other one.
// A type is less specific if it matches the other type using the DYN type.
func isEqualOrLessSpecific(t1 *exprpb.Type, t2 *exprpb.Type) bool {
kind1, kind2 := kindOf(t1), kindOf(t2)
func isEqualOrLessSpecific(t1, t2 *types.Type) bool {
kind1, kind2 := t1.Kind(), t2.Kind()
// The first type is less specific.
if isDyn(t1) || kind1 == kindTypeParam {
if isDyn(t1) || kind1 == types.TypeParamKind {
return true
}
// The first type is not less specific.
if isDyn(t2) || kind2 == kindTypeParam {
if isDyn(t2) || kind2 == types.TypeParamKind {
return false
}
// Types must be of the same kind to be equal.
@@ -173,38 +73,34 @@ func isEqualOrLessSpecific(t1 *exprpb.Type, t2 *exprpb.Type) bool {
// With limited exceptions for ANY and JSON values, the types must agree and be equivalent in
// order to return true.
switch kind1 {
case kindAbstract:
a1 := t1.GetAbstractType()
a2 := t2.GetAbstractType()
if a1.GetName() != a2.GetName() ||
len(a1.GetParameterTypes()) != len(a2.GetParameterTypes()) {
case types.OpaqueKind:
if t1.TypeName() != t2.TypeName() ||
len(t1.Parameters()) != len(t2.Parameters()) {
return false
}
for i, p1 := range a1.GetParameterTypes() {
if !isEqualOrLessSpecific(p1, a2.GetParameterTypes()[i]) {
for i, p1 := range t1.Parameters() {
if !isEqualOrLessSpecific(p1, t2.Parameters()[i]) {
return false
}
}
return true
case kindList:
return isEqualOrLessSpecific(t1.GetListType().GetElemType(), t2.GetListType().GetElemType())
case kindMap:
m1 := t1.GetMapType()
m2 := t2.GetMapType()
return isEqualOrLessSpecific(m1.GetKeyType(), m2.GetKeyType()) &&
isEqualOrLessSpecific(m1.GetValueType(), m2.GetValueType())
case kindType:
case types.ListKind:
return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0])
case types.MapKind:
return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0]) &&
isEqualOrLessSpecific(t1.Parameters()[1], t2.Parameters()[1])
case types.TypeKind:
return true
default:
return proto.Equal(t1, t2)
return t1.IsExactType(t2)
}
}
// / internalIsAssignable returns true if t1 is assignable to t2.
func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool {
func internalIsAssignable(m *mapping, t1, t2 *types.Type) bool {
// Process type parameters.
kind1, kind2 := kindOf(t1), kindOf(t2)
if kind2 == kindTypeParam {
kind1, kind2 := t1.Kind(), t2.Kind()
if kind2 == types.TypeParamKind {
// If t2 is a valid type substitution for t1, return true.
valid, t2HasSub := isValidTypeSubstitution(m, t1, t2)
if valid {
@@ -217,7 +113,7 @@ func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool {
}
// Otherwise, fall through to check whether t1 is a possible substitution for t2.
}
if kind1 == kindTypeParam {
if kind1 == types.TypeParamKind {
// Return whether t1 is a valid substitution for t2. If not, do no additional checks as the
// possible type substitutions have been searched in both directions.
valid, _ := isValidTypeSubstitution(m, t2, t1)
@@ -228,40 +124,25 @@ func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool {
if isDynOrError(t1) || isDynOrError(t2) {
return true
}
// Preserve the nullness checks of the legacy type-checker.
if kind1 == types.NullTypeKind {
return internalIsAssignableNull(t2)
}
if kind2 == types.NullTypeKind {
return internalIsAssignableNull(t1)
}
// Test for when the types do not need to agree, but are more specific than dyn.
switch kind1 {
case kindNull:
return internalIsAssignableNull(t2)
case kindPrimitive:
return internalIsAssignablePrimitive(t1.GetPrimitive(), t2)
case kindWrapper:
return internalIsAssignable(m, decls.NewPrimitiveType(t1.GetWrapper()), t2)
default:
if kind1 != kind2 {
return false
}
}
// Test for when the types must agree.
switch kind1 {
// ERROR, TYPE_PARAM, and DYN handled above.
case kindAbstract:
return internalIsAssignableAbstractType(m, t1.GetAbstractType(), t2.GetAbstractType())
case kindFunction:
return internalIsAssignableFunction(m, t1.GetFunction(), t2.GetFunction())
case kindList:
return internalIsAssignable(m, t1.GetListType().GetElemType(), t2.GetListType().GetElemType())
case kindMap:
return internalIsAssignableMap(m, t1.GetMapType(), t2.GetMapType())
case kindObject:
return t1.GetMessageType() == t2.GetMessageType()
case kindType:
// A type is a type is a type, any additional parameterization of the
// type cannot affect method resolution or assignability.
return true
case kindWellKnown:
return t1.GetWellKnown() == t2.GetWellKnown()
case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind,
types.AnyKind, types.DurationKind, types.TimestampKind,
types.StructKind:
return t1.IsAssignableType(t2)
case types.TypeKind:
return kind2 == types.TypeKind
case types.OpaqueKind, types.ListKind, types.MapKind:
return t1.Kind() == t2.Kind() && t1.TypeName() == t2.TypeName() &&
internalIsAssignableList(m, t1.Parameters(), t2.Parameters())
default:
return false
}
@@ -274,16 +155,16 @@ func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool {
// - t2 has a type substitution (t2sub) equal to t1
// - t2 has a type substitution (t2sub) assignable to t1
// - t2 does not occur within t1.
func isValidTypeSubstitution(m *mapping, t1, t2 *exprpb.Type) (valid, hasSub bool) {
func isValidTypeSubstitution(m *mapping, t1, t2 *types.Type) (valid, hasSub bool) {
// Early return if the t1 and t2 are the same instance.
kind1, kind2 := kindOf(t1), kindOf(t2)
if kind1 == kind2 && (t1 == t2 || proto.Equal(t1, t2)) {
kind1, kind2 := t1.Kind(), t2.Kind()
if kind1 == kind2 && t1.IsExactType(t2) {
return true, true
}
if t2Sub, found := m.find(t2); found {
// Early return if t1 and t2Sub are the same instance as otherwise the mapping
// might mark a type as being a subtitution for itself.
if kind1 == kindOf(t2Sub) && (t1 == t2Sub || proto.Equal(t1, t2Sub)) {
if kind1 == t2Sub.Kind() && t1.IsExactType(t2Sub) {
return true, true
}
// If the types are compatible, pick the more general type and return true
@@ -305,28 +186,10 @@ func isValidTypeSubstitution(m *mapping, t1, t2 *exprpb.Type) (valid, hasSub boo
return false, false
}
// internalIsAssignableAbstractType returns true if the abstract type names agree and all type
// parameters are assignable.
func internalIsAssignableAbstractType(m *mapping, a1 *exprpb.Type_AbstractType, a2 *exprpb.Type_AbstractType) bool {
return a1.GetName() == a2.GetName() &&
internalIsAssignableList(m, a1.GetParameterTypes(), a2.GetParameterTypes())
}
// internalIsAssignableFunction returns true if the function return type and arg types are
// assignable.
func internalIsAssignableFunction(m *mapping, f1 *exprpb.Type_FunctionType, f2 *exprpb.Type_FunctionType) bool {
f1ArgTypes := flattenFunctionTypes(f1)
f2ArgTypes := flattenFunctionTypes(f2)
if internalIsAssignableList(m, f1ArgTypes, f2ArgTypes) {
return true
}
return false
}
// internalIsAssignableList returns true if the element types at each index in the list are
// assignable from l1[i] to l2[i]. The list lengths must also agree for the lists to be
// assignable.
func internalIsAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) bool {
func internalIsAssignableList(m *mapping, l1, l2 []*types.Type) bool {
if len(l1) != len(l2) {
return false
}
@@ -338,41 +201,22 @@ func internalIsAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type)
return true
}
// internalIsAssignableMap returns true if map m1 may be assigned to map m2.
func internalIsAssignableMap(m *mapping, m1 *exprpb.Type_MapType, m2 *exprpb.Type_MapType) bool {
if internalIsAssignableList(m,
[]*exprpb.Type{m1.GetKeyType(), m1.GetValueType()},
[]*exprpb.Type{m2.GetKeyType(), m2.GetValueType()}) {
// internalIsAssignableNull returns true if the type is nullable.
func internalIsAssignableNull(t *types.Type) bool {
return isLegacyNullable(t) || t.IsAssignableType(types.NullType)
}
// isLegacyNullable preserves the null-ness compatibility of the original type-checker implementation.
func isLegacyNullable(t *types.Type) bool {
switch t.Kind() {
case types.OpaqueKind, types.StructKind, types.AnyKind, types.DurationKind, types.TimestampKind:
return true
}
return false
}
// internalIsAssignableNull returns true if the type is nullable.
func internalIsAssignableNull(t *exprpb.Type) bool {
switch kindOf(t) {
case kindAbstract, kindObject, kindNull, kindWellKnown, kindWrapper:
return true
default:
return false
}
}
// internalIsAssignablePrimitive returns true if the target type is the same or if it is a wrapper
// for the primitive type.
func internalIsAssignablePrimitive(p exprpb.Type_PrimitiveType, target *exprpb.Type) bool {
switch kindOf(target) {
case kindPrimitive:
return p == target.GetPrimitive()
case kindWrapper:
return p == target.GetWrapper()
default:
return false
}
}
// isAssignable returns an updated type substitution mapping if t1 is assignable to t2.
func isAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) *mapping {
func isAssignable(m *mapping, t1, t2 *types.Type) *mapping {
mCopy := m.copy()
if internalIsAssignable(mCopy, t1, t2) {
return mCopy
@@ -381,7 +225,7 @@ func isAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) *mapping {
}
// isAssignableList returns an updated type substitution mapping if l1 is assignable to l2.
func isAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) *mapping {
func isAssignableList(m *mapping, l1, l2 []*types.Type) *mapping {
mCopy := m.copy()
if internalIsAssignableList(mCopy, l1, l2) {
return mCopy
@@ -389,44 +233,8 @@ func isAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) *mapping
return nil
}
// kindOf returns the kind of the type as defined in the checked.proto.
func kindOf(t *exprpb.Type) int {
if t == nil || t.TypeKind == nil {
return kindUnknown
}
switch t.GetTypeKind().(type) {
case *exprpb.Type_Error:
return kindError
case *exprpb.Type_Function:
return kindFunction
case *exprpb.Type_Dyn:
return kindDyn
case *exprpb.Type_Primitive:
return kindPrimitive
case *exprpb.Type_WellKnown:
return kindWellKnown
case *exprpb.Type_Wrapper:
return kindWrapper
case *exprpb.Type_Null:
return kindNull
case *exprpb.Type_Type:
return kindType
case *exprpb.Type_ListType_:
return kindList
case *exprpb.Type_MapType_:
return kindMap
case *exprpb.Type_MessageType:
return kindObject
case *exprpb.Type_TypeParam:
return kindTypeParam
case *exprpb.Type_AbstractType_:
return kindAbstract
}
return kindUnknown
}
// mostGeneral returns the more general of two types which are known to unify.
func mostGeneral(t1 *exprpb.Type, t2 *exprpb.Type) *exprpb.Type {
func mostGeneral(t1, t2 *types.Type) *types.Type {
if isEqualOrLessSpecific(t1, t2) {
return t1
}
@@ -436,32 +244,25 @@ func mostGeneral(t1 *exprpb.Type, t2 *exprpb.Type) *exprpb.Type {
// notReferencedIn checks whether the type doesn't appear directly or transitively within the other
// type. This is a standard requirement for type unification, commonly referred to as the "occurs
// check".
func notReferencedIn(m *mapping, t *exprpb.Type, withinType *exprpb.Type) bool {
if proto.Equal(t, withinType) {
func notReferencedIn(m *mapping, t, withinType *types.Type) bool {
if t.IsExactType(withinType) {
return false
}
withinKind := kindOf(withinType)
withinKind := withinType.Kind()
switch withinKind {
case kindTypeParam:
case types.TypeParamKind:
wtSub, found := m.find(withinType)
if !found {
return true
}
return notReferencedIn(m, t, wtSub)
case kindAbstract:
for _, pt := range withinType.GetAbstractType().GetParameterTypes() {
case types.OpaqueKind, types.ListKind, types.MapKind:
for _, pt := range withinType.Parameters() {
if !notReferencedIn(m, t, pt) {
return false
}
}
return true
case kindList:
return notReferencedIn(m, t, withinType.GetListType().GetElemType())
case kindMap:
mt := withinType.GetMapType()
return notReferencedIn(m, t, mt.GetKeyType()) && notReferencedIn(m, t, mt.GetValueType())
case kindWrapper:
return notReferencedIn(m, t, decls.NewPrimitiveType(withinType.GetWrapper()))
default:
return true
}
@@ -469,39 +270,25 @@ func notReferencedIn(m *mapping, t *exprpb.Type, withinType *exprpb.Type) bool {
// substitute replaces all direct and indirect occurrences of bound type parameters. Unbound type
// parameters are replaced by DYN if typeParamToDyn is true.
func substitute(m *mapping, t *exprpb.Type, typeParamToDyn bool) *exprpb.Type {
func substitute(m *mapping, t *types.Type, typeParamToDyn bool) *types.Type {
if tSub, found := m.find(t); found {
return substitute(m, tSub, typeParamToDyn)
}
kind := kindOf(t)
if typeParamToDyn && kind == kindTypeParam {
return decls.Dyn
kind := t.Kind()
if typeParamToDyn && kind == types.TypeParamKind {
return types.DynType
}
switch kind {
case kindAbstract:
at := t.GetAbstractType()
params := make([]*exprpb.Type, len(at.GetParameterTypes()))
for i, p := range at.GetParameterTypes() {
params[i] = substitute(m, p, typeParamToDyn)
}
return decls.NewAbstractType(at.GetName(), params...)
case kindFunction:
fn := t.GetFunction()
rt := substitute(m, fn.ResultType, typeParamToDyn)
args := make([]*exprpb.Type, len(fn.GetArgTypes()))
for i, a := range fn.ArgTypes {
args[i] = substitute(m, a, typeParamToDyn)
}
return decls.NewFunctionType(rt, args...)
case kindList:
return decls.NewListType(substitute(m, t.GetListType().GetElemType(), typeParamToDyn))
case kindMap:
mt := t.GetMapType()
return decls.NewMapType(substitute(m, mt.GetKeyType(), typeParamToDyn),
substitute(m, mt.GetValueType(), typeParamToDyn))
case kindType:
if t.GetType() != nil {
return decls.NewTypeType(substitute(m, t.GetType(), typeParamToDyn))
case types.OpaqueKind:
return types.NewOpaqueType(t.TypeName(), substituteParams(m, t.Parameters(), typeParamToDyn)...)
case types.ListKind:
return types.NewListType(substitute(m, t.Parameters()[0], typeParamToDyn))
case types.MapKind:
return types.NewMapType(substitute(m, t.Parameters()[0], typeParamToDyn),
substitute(m, t.Parameters()[1], typeParamToDyn))
case types.TypeKind:
if len(t.Parameters()) > 0 {
return types.NewTypeTypeWithParam(substitute(m, t.Parameters()[0], typeParamToDyn))
}
return t
default:
@@ -509,21 +296,14 @@ func substitute(m *mapping, t *exprpb.Type, typeParamToDyn bool) *exprpb.Type {
}
}
func typeKey(t *exprpb.Type) string {
return FormatCheckedType(t)
func substituteParams(m *mapping, typeParams []*types.Type, typeParamToDyn bool) []*types.Type {
subParams := make([]*types.Type, len(typeParams))
for i, tp := range typeParams {
subParams[i] = substitute(m, tp, typeParamToDyn)
}
return subParams
}
// flattenFunctionTypes takes a function with arg types T1, T2, ..., TN and result type TR
// and returns a slice containing {T1, T2, ..., TN, TR}.
func flattenFunctionTypes(f *exprpb.Type_FunctionType) []*exprpb.Type {
argTypes := f.GetArgTypes()
if len(argTypes) == 0 {
return []*exprpb.Type{f.GetResultType()}
}
flattend := make([]*exprpb.Type, len(argTypes)+1, len(argTypes)+1)
for i, at := range argTypes {
flattend[i] = at
}
flattend[len(argTypes)] = f.GetResultType()
return flattend
func newFunctionType(resultType *types.Type, argTypes ...*types.Type) *types.Type {
return types.NewOpaqueType("function", append([]*types.Type{resultType}, argTypes...)...)
}

52
vendor/github.com/google/cel-go/common/ast/BUILD.bazel generated vendored Normal file
View File

@@ -0,0 +1,52 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(
default_visibility = [
"//cel:__subpackages__",
"//checker:__subpackages__",
"//common:__subpackages__",
"//interpreter:__subpackages__",
],
licenses = ["notice"], # Apache 2.0
)
go_library(
name = "go_default_library",
srcs = [
"ast.go",
"expr.go",
],
importpath = "github.com/google/cel-go/common/ast",
deps = [
"//common/types:go_default_library",
"//common/types/ref:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"ast_test.go",
"expr_test.go",
],
embed = [
":go_default_library",
],
deps = [
"//checker:go_default_library",
"//checker/decls:go_default_library",
"//common:go_default_library",
"//common/containers:go_default_library",
"//common/decls:go_default_library",
"//common/overloads:go_default_library",
"//common/stdlib:go_default_library",
"//common/types:go_default_library",
"//common/types/ref:go_default_library",
"//parser:go_default_library",
"//test/proto3pb:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

226
vendor/github.com/google/cel-go/common/ast/ast.go generated vendored Normal file
View File

@@ -0,0 +1,226 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ast declares data structures useful for parsed and checked abstract syntax trees
package ast
import (
"fmt"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
structpb "google.golang.org/protobuf/types/known/structpb"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// CheckedAST contains a protobuf expression and source info along with CEL-native type and reference information.
type CheckedAST struct {
Expr *exprpb.Expr
SourceInfo *exprpb.SourceInfo
TypeMap map[int64]*types.Type
ReferenceMap map[int64]*ReferenceInfo
}
// CheckedASTToCheckedExpr converts a CheckedAST to a CheckedExpr protobouf.
func CheckedASTToCheckedExpr(ast *CheckedAST) (*exprpb.CheckedExpr, error) {
refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap))
for id, ref := range ast.ReferenceMap {
r, err := ReferenceInfoToReferenceExpr(ref)
if err != nil {
return nil, err
}
refMap[id] = r
}
typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap))
for id, typ := range ast.TypeMap {
t, err := types.TypeToExprType(typ)
if err != nil {
return nil, err
}
typeMap[id] = t
}
return &exprpb.CheckedExpr{
Expr: ast.Expr,
SourceInfo: ast.SourceInfo,
ReferenceMap: refMap,
TypeMap: typeMap,
}, nil
}
// CheckedExprToCheckedAST converts a CheckedExpr protobuf to a CheckedAST instance.
func CheckedExprToCheckedAST(checked *exprpb.CheckedExpr) (*CheckedAST, error) {
refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap()))
for id, ref := range checked.GetReferenceMap() {
r, err := ReferenceExprToReferenceInfo(ref)
if err != nil {
return nil, err
}
refMap[id] = r
}
typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap()))
for id, typ := range checked.GetTypeMap() {
t, err := types.ExprTypeToType(typ)
if err != nil {
return nil, err
}
typeMap[id] = t
}
return &CheckedAST{
Expr: checked.GetExpr(),
SourceInfo: checked.GetSourceInfo(),
ReferenceMap: refMap,
TypeMap: typeMap,
}, nil
}
// ReferenceInfo contains a CEL native representation of an identifier reference which may refer to
// either a qualified identifier name, a set of overload ids, or a constant value from an enum.
type ReferenceInfo struct {
Name string
OverloadIDs []string
Value ref.Val
}
// NewIdentReference creates a ReferenceInfo instance for an identifier with an optional constant value.
func NewIdentReference(name string, value ref.Val) *ReferenceInfo {
return &ReferenceInfo{Name: name, Value: value}
}
// NewFunctionReference creates a ReferenceInfo instance for a set of function overloads.
func NewFunctionReference(overloads ...string) *ReferenceInfo {
info := &ReferenceInfo{}
for _, id := range overloads {
info.AddOverload(id)
}
return info
}
// AddOverload appends a function overload ID to the ReferenceInfo.
func (r *ReferenceInfo) AddOverload(overloadID string) {
for _, id := range r.OverloadIDs {
if id == overloadID {
return
}
}
r.OverloadIDs = append(r.OverloadIDs, overloadID)
}
// Equals returns whether two references are identical to each other.
func (r *ReferenceInfo) Equals(other *ReferenceInfo) bool {
if r.Name != other.Name {
return false
}
if len(r.OverloadIDs) != len(other.OverloadIDs) {
return false
}
if len(r.OverloadIDs) != 0 {
overloadMap := make(map[string]struct{}, len(r.OverloadIDs))
for _, id := range r.OverloadIDs {
overloadMap[id] = struct{}{}
}
for _, id := range other.OverloadIDs {
_, found := overloadMap[id]
if !found {
return false
}
}
}
if r.Value == nil && other.Value == nil {
return true
}
if r.Value == nil && other.Value != nil ||
r.Value != nil && other.Value == nil ||
r.Value.Equal(other.Value) != types.True {
return false
}
return true
}
// ReferenceInfoToReferenceExpr converts a ReferenceInfo instance to a protobuf Reference suitable for serialization.
func ReferenceInfoToReferenceExpr(info *ReferenceInfo) (*exprpb.Reference, error) {
c, err := ValToConstant(info.Value)
if err != nil {
return nil, err
}
return &exprpb.Reference{
Name: info.Name,
OverloadId: info.OverloadIDs,
Value: c,
}, nil
}
// ReferenceExprToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance.
func ReferenceExprToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) {
v, err := ConstantToVal(ref.GetValue())
if err != nil {
return nil, err
}
return &ReferenceInfo{
Name: ref.GetName(),
OverloadIDs: ref.GetOverloadId(),
Value: v,
}, nil
}
// ValToConstant converts a CEL-native ref.Val to a protobuf Constant.
//
// Only simple scalar types are supported by this method.
func ValToConstant(v ref.Val) (*exprpb.Constant, error) {
if v == nil {
return nil, nil
}
switch v.Type() {
case types.BoolType:
return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil
case types.BytesType:
return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil
case types.DoubleType:
return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil
case types.IntType:
return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil
case types.NullType:
return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil
case types.StringType:
return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil
case types.UintType:
return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil
}
return nil, fmt.Errorf("unsupported constant kind: %v", v.Type())
}
// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val.
func ConstantToVal(c *exprpb.Constant) (ref.Val, error) {
if c == nil {
return nil, nil
}
switch c.GetConstantKind().(type) {
case *exprpb.Constant_BoolValue:
return types.Bool(c.GetBoolValue()), nil
case *exprpb.Constant_BytesValue:
return types.Bytes(c.GetBytesValue()), nil
case *exprpb.Constant_DoubleValue:
return types.Double(c.GetDoubleValue()), nil
case *exprpb.Constant_Int64Value:
return types.Int(c.GetInt64Value()), nil
case *exprpb.Constant_NullValue:
return types.NullValue, nil
case *exprpb.Constant_StringValue:
return types.String(c.GetStringValue()), nil
case *exprpb.Constant_Uint64Value:
return types.Uint(c.GetUint64Value()), nil
}
return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind())
}

709
vendor/github.com/google/cel-go/common/ast/expr.go generated vendored Normal file
View File

@@ -0,0 +1,709 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import (
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// ExprKind represents the expression node kind.
type ExprKind int
const (
// UnspecifiedKind represents an unset expression with no specified properties.
UnspecifiedKind ExprKind = iota
// LiteralKind represents a primitive scalar literal.
LiteralKind
// IdentKind represents a simple variable, constant, or type identifier.
IdentKind
// SelectKind represents a field selection expression.
SelectKind
// CallKind represents a function call.
CallKind
// ListKind represents a list literal expression.
ListKind
// MapKind represents a map literal expression.
MapKind
// StructKind represents a struct literal expression.
StructKind
// ComprehensionKind represents a comprehension expression generated by a macro.
ComprehensionKind
)
// NavigateCheckedAST converts a CheckedAST to a NavigableExpr
func NavigateCheckedAST(ast *CheckedAST) NavigableExpr {
return newNavigableExpr(nil, ast.Expr, ast.TypeMap)
}
// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match.
//
// This function type should be use with the `Match` and `MatchList` calls.
type ExprMatcher func(NavigableExpr) bool
// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr
// is comprised of all constant values, such as a simple literal or even list and map literal.
func ConstantValueMatcher() ExprMatcher {
return matchIsConstantValue
}
// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches
// the specified `kind`.
func KindMatcher(kind ExprKind) ExprMatcher {
return func(e NavigableExpr) bool {
return e.Kind() == kind
}
}
// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose
// function name is equal to `funcName`.
func FunctionMatcher(funcName string) ExprMatcher {
return func(e NavigableExpr) bool {
if e.Kind() != CallKind {
return false
}
return e.AsCall().FunctionName() == funcName
}
}
// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list.
//
// Such a result would work well with subsequent MatchList calls.
func AllMatcher() ExprMatcher {
return func(NavigableExpr) bool {
return true
}
}
// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values of the
// descendants which match.
func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr {
return matchListInternal([]NavigableExpr{expr}, matcher, true)
}
// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a
// subset of NavigableExpr values which match.
func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr {
visit := make([]NavigableExpr, len(exprs))
copy(visit, exprs)
return matchListInternal(visit, matcher, false)
}
func matchListInternal(visit []NavigableExpr, matcher ExprMatcher, visitDescendants bool) []NavigableExpr {
var matched []NavigableExpr
for len(visit) != 0 {
e := visit[0]
if matcher(e) {
matched = append(matched, e)
}
if visitDescendants {
visit = append(visit[1:], e.Children()...)
} else {
visit = visit[1:]
}
}
return matched
}
func matchIsConstantValue(e NavigableExpr) bool {
if e.Kind() == LiteralKind {
return true
}
if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind {
for _, child := range e.Children() {
if !matchIsConstantValue(child) {
return false
}
}
return true
}
return false
}
// NavigableExpr represents the base navigable expression value.
//
// Depending on the `Kind()` value, the NavigableExpr may be converted to a concrete expression types
// as indicated by the `As<Kind>` methods.
//
// NavigableExpr values and their concrete expression types should be nil-safe. Conversion of an expr
// to the wrong kind should produce a nil value.
type NavigableExpr interface {
// ID of the expression as it appears in the AST
ID() int64
// Kind of the expression node. See ExprKind for the valid enum values.
Kind() ExprKind
// Type of the expression node.
Type() *types.Type
// Parent returns the parent expression node, if one exists.
Parent() (NavigableExpr, bool)
// Children returns a list of child expression nodes.
Children() []NavigableExpr
// ToExpr adapts this NavigableExpr to a protobuf representation.
ToExpr() *exprpb.Expr
// AsCall adapts the expr into a NavigableCallExpr
//
// The Kind() must be equal to a CallKind for the conversion to be well-defined.
AsCall() NavigableCallExpr
// AsComprehension adapts the expr into a NavigableComprehensionExpr.
//
// The Kind() must be equal to a ComprehensionKind for the conversion to be well-defined.
AsComprehension() NavigableComprehensionExpr
// AsIdent adapts the expr into an identifier string.
//
// The Kind() must be equal to an IdentKind for the conversion to be well-defined.
AsIdent() string
// AsLiteral adapts the expr into a constant ref.Val.
//
// The Kind() must be equal to a LiteralKind for the conversion to be well-defined.
AsLiteral() ref.Val
// AsList adapts the expr into a NavigableListExpr.
//
// The Kind() must be equal to a ListKind for the conversion to be well-defined.
AsList() NavigableListExpr
// AsMap adapts the expr into a NavigableMapExpr.
//
// The Kind() must be equal to a MapKind for the conversion to be well-defined.
AsMap() NavigableMapExpr
// AsSelect adapts the expr into a NavigableSelectExpr.
//
// The Kind() must be equal to a SelectKind for the conversion to be well-defined.
AsSelect() NavigableSelectExpr
// AsStruct adapts the expr into a NavigableStructExpr.
//
// The Kind() must be equal to a StructKind for the conversion to be well-defined.
AsStruct() NavigableStructExpr
// marker interface method
isNavigable()
}
// NavigableCallExpr defines an interface for inspecting a function call and its arugments.
type NavigableCallExpr interface {
// FunctionName returns the name of the function.
FunctionName() string
// Target returns the target of the expression if one is present.
Target() NavigableExpr
// Args returns the list of call arguments, excluding the target.
Args() []NavigableExpr
// ReturnType returns the result type of the call.
ReturnType() *types.Type
// marker interface method
isNavigable()
}
// NavigableListExpr defines an interface for inspecting a list literal expression.
type NavigableListExpr interface {
// Elements returns the list elements as navigable expressions.
Elements() []NavigableExpr
// OptionalIndicies returns the list of optional indices in the list literal.
OptionalIndices() []int32
// Size returns the number of elements in the list.
Size() int
// marker interface method
isNavigable()
}
// NavigableSelectExpr defines an interface for inspecting a select expression.
type NavigableSelectExpr interface {
// Operand returns the selection operand expression.
Operand() NavigableExpr
// FieldName returns the field name being selected from the operand.
FieldName() string
// IsTestOnly indicates whether the select expression is a presence test generated by a macro.
IsTestOnly() bool
// marker interface method
isNavigable()
}
// NavigableMapExpr defines an interface for inspecting a map expression.
type NavigableMapExpr interface {
// Entries returns the map key value pairs as NavigableEntry values.
Entries() []NavigableEntry
// Size returns the number of entries in the map.
Size() int
// marker interface method
isNavigable()
}
// NavigableEntry defines an interface for inspecting a map entry.
type NavigableEntry interface {
// Key returns the map entry key expression.
Key() NavigableExpr
// Value returns the map entry value expression.
Value() NavigableExpr
// IsOptional returns whether the entry is optional.
IsOptional() bool
// marker interface method
isNavigable()
}
// NavigableStructExpr defines an interfaces for inspecting a struct and its field initializers.
type NavigableStructExpr interface {
// TypeName returns the struct type name.
TypeName() string
// Fields returns the set of field initializers in the struct expression as NavigableField values.
Fields() []NavigableField
// marker interface method
isNavigable()
}
// NavigableField defines an interface for inspecting a struct field initialization.
type NavigableField interface {
// FieldName returns the name of the field.
FieldName() string
// Value returns the field initialization expression.
Value() NavigableExpr
// IsOptional returns whether the field is optional.
IsOptional() bool
// marker interface method
isNavigable()
}
// NavigableComprehensionExpr defines an interface for inspecting a comprehension expression.
type NavigableComprehensionExpr interface {
// IterRange returns the iteration range expression.
IterRange() NavigableExpr
// IterVar returns the iteration variable name.
IterVar() string
// AccuVar returns the accumulation variable name.
AccuVar() string
// AccuInit returns the accumulation variable initialization expression.
AccuInit() NavigableExpr
// LoopCondition returns the loop condition expression.
LoopCondition() NavigableExpr
// LoopStep returns the loop step expression.
LoopStep() NavigableExpr
// Result returns the comprehension result expression.
Result() NavigableExpr
// marker interface method
isNavigable()
}
func newNavigableExpr(parent NavigableExpr, expr *exprpb.Expr, typeMap map[int64]*types.Type) NavigableExpr {
kind, factory := kindOf(expr)
nav := &navigableExprImpl{
parent: parent,
kind: kind,
expr: expr,
typeMap: typeMap,
createChildren: factory,
}
return nav
}
type navigableExprImpl struct {
parent NavigableExpr
kind ExprKind
expr *exprpb.Expr
typeMap map[int64]*types.Type
createChildren childFactory
}
func (nav *navigableExprImpl) ID() int64 {
return nav.ToExpr().GetId()
}
func (nav *navigableExprImpl) Kind() ExprKind {
return nav.kind
}
func (nav *navigableExprImpl) Type() *types.Type {
if t, found := nav.typeMap[nav.ID()]; found {
return t
}
return types.DynType
}
func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) {
if nav.parent != nil {
return nav.parent, true
}
return nil, false
}
func (nav *navigableExprImpl) Children() []NavigableExpr {
return nav.createChildren(nav)
}
func (nav *navigableExprImpl) ToExpr() *exprpb.Expr {
return nav.expr
}
func (nav *navigableExprImpl) AsCall() NavigableCallExpr {
return navigableCallImpl{navigableExprImpl: nav}
}
func (nav *navigableExprImpl) AsComprehension() NavigableComprehensionExpr {
return navigableComprehensionImpl{navigableExprImpl: nav}
}
func (nav *navigableExprImpl) AsIdent() string {
return nav.ToExpr().GetIdentExpr().GetName()
}
func (nav *navigableExprImpl) AsLiteral() ref.Val {
if nav.Kind() != LiteralKind {
return nil
}
val, err := ConstantToVal(nav.ToExpr().GetConstExpr())
if err != nil {
panic(err)
}
return val
}
func (nav *navigableExprImpl) AsList() NavigableListExpr {
return navigableListImpl{navigableExprImpl: nav}
}
func (nav *navigableExprImpl) AsMap() NavigableMapExpr {
return navigableMapImpl{navigableExprImpl: nav}
}
func (nav *navigableExprImpl) AsSelect() NavigableSelectExpr {
return navigableSelectImpl{navigableExprImpl: nav}
}
func (nav *navigableExprImpl) AsStruct() NavigableStructExpr {
return navigableStructImpl{navigableExprImpl: nav}
}
func (nav *navigableExprImpl) createChild(e *exprpb.Expr) NavigableExpr {
return newNavigableExpr(nav, e, nav.typeMap)
}
func (nav *navigableExprImpl) isNavigable() {}
type navigableCallImpl struct {
*navigableExprImpl
}
func (call navigableCallImpl) FunctionName() string {
return call.ToExpr().GetCallExpr().GetFunction()
}
func (call navigableCallImpl) Target() NavigableExpr {
t := call.ToExpr().GetCallExpr().GetTarget()
if t != nil {
return call.createChild(t)
}
return nil
}
func (call navigableCallImpl) Args() []NavigableExpr {
args := call.ToExpr().GetCallExpr().GetArgs()
navArgs := make([]NavigableExpr, len(args))
for i, a := range args {
navArgs[i] = call.createChild(a)
}
return navArgs
}
func (call navigableCallImpl) ReturnType() *types.Type {
return call.Type()
}
type navigableComprehensionImpl struct {
*navigableExprImpl
}
func (comp navigableComprehensionImpl) IterRange() NavigableExpr {
return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetIterRange())
}
func (comp navigableComprehensionImpl) IterVar() string {
return comp.ToExpr().GetComprehensionExpr().GetIterVar()
}
func (comp navigableComprehensionImpl) AccuVar() string {
return comp.ToExpr().GetComprehensionExpr().GetAccuVar()
}
func (comp navigableComprehensionImpl) AccuInit() NavigableExpr {
return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetAccuInit())
}
func (comp navigableComprehensionImpl) LoopCondition() NavigableExpr {
return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopCondition())
}
func (comp navigableComprehensionImpl) LoopStep() NavigableExpr {
return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopStep())
}
func (comp navigableComprehensionImpl) Result() NavigableExpr {
return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetResult())
}
type navigableListImpl struct {
*navigableExprImpl
}
func (l navigableListImpl) Elements() []NavigableExpr {
return l.Children()
}
func (l navigableListImpl) OptionalIndices() []int32 {
return l.ToExpr().GetListExpr().GetOptionalIndices()
}
func (l navigableListImpl) Size() int {
return len(l.ToExpr().GetListExpr().GetElements())
}
type navigableMapImpl struct {
*navigableExprImpl
}
func (m navigableMapImpl) Entries() []NavigableEntry {
mapExpr := m.ToExpr().GetStructExpr()
entries := make([]NavigableEntry, len(mapExpr.GetEntries()))
for i, e := range mapExpr.GetEntries() {
entries[i] = navigableEntryImpl{
key: m.createChild(e.GetMapKey()),
val: m.createChild(e.GetValue()),
isOpt: e.GetOptionalEntry(),
}
}
return entries
}
func (m navigableMapImpl) Size() int {
return len(m.ToExpr().GetStructExpr().GetEntries())
}
type navigableEntryImpl struct {
key NavigableExpr
val NavigableExpr
isOpt bool
}
func (e navigableEntryImpl) Key() NavigableExpr {
return e.key
}
func (e navigableEntryImpl) Value() NavigableExpr {
return e.val
}
func (e navigableEntryImpl) IsOptional() bool {
return e.isOpt
}
func (e navigableEntryImpl) isNavigable() {}
type navigableSelectImpl struct {
*navigableExprImpl
}
func (sel navigableSelectImpl) FieldName() string {
return sel.ToExpr().GetSelectExpr().GetField()
}
func (sel navigableSelectImpl) IsTestOnly() bool {
return sel.ToExpr().GetSelectExpr().GetTestOnly()
}
func (sel navigableSelectImpl) Operand() NavigableExpr {
return sel.createChild(sel.ToExpr().GetSelectExpr().GetOperand())
}
type navigableStructImpl struct {
*navigableExprImpl
}
func (s navigableStructImpl) TypeName() string {
return s.ToExpr().GetStructExpr().GetMessageName()
}
func (s navigableStructImpl) Fields() []NavigableField {
fieldInits := s.ToExpr().GetStructExpr().GetEntries()
fields := make([]NavigableField, len(fieldInits))
for i, f := range fieldInits {
fields[i] = navigableFieldImpl{
name: f.GetFieldKey(),
val: s.createChild(f.GetValue()),
isOpt: f.GetOptionalEntry(),
}
}
return fields
}
type navigableFieldImpl struct {
name string
val NavigableExpr
isOpt bool
}
func (f navigableFieldImpl) FieldName() string {
return f.name
}
func (f navigableFieldImpl) Value() NavigableExpr {
return f.val
}
func (f navigableFieldImpl) IsOptional() bool {
return f.isOpt
}
func (f navigableFieldImpl) isNavigable() {}
func kindOf(expr *exprpb.Expr) (ExprKind, childFactory) {
switch expr.GetExprKind().(type) {
case *exprpb.Expr_ConstExpr:
return LiteralKind, noopFactory
case *exprpb.Expr_IdentExpr:
return IdentKind, noopFactory
case *exprpb.Expr_SelectExpr:
return SelectKind, selectFactory
case *exprpb.Expr_CallExpr:
return CallKind, callArgFactory
case *exprpb.Expr_ListExpr:
return ListKind, listElemFactory
case *exprpb.Expr_StructExpr:
if expr.GetStructExpr().GetMessageName() != "" {
return StructKind, structEntryFactory
}
return MapKind, mapEntryFactory
case *exprpb.Expr_ComprehensionExpr:
return ComprehensionKind, comprehensionFactory
default:
return UnspecifiedKind, noopFactory
}
}
type childFactory func(*navigableExprImpl) []NavigableExpr
func noopFactory(*navigableExprImpl) []NavigableExpr {
return nil
}
func selectFactory(nav *navigableExprImpl) []NavigableExpr {
return []NavigableExpr{
nav.createChild(nav.ToExpr().GetSelectExpr().GetOperand()),
}
}
func callArgFactory(nav *navigableExprImpl) []NavigableExpr {
call := nav.ToExpr().GetCallExpr()
argCount := len(call.GetArgs())
if call.GetTarget() != nil {
argCount++
}
navExprs := make([]NavigableExpr, argCount)
i := 0
if call.GetTarget() != nil {
navExprs[i] = nav.createChild(call.GetTarget())
i++
}
for _, arg := range call.GetArgs() {
navExprs[i] = nav.createChild(arg)
i++
}
return navExprs
}
func listElemFactory(nav *navigableExprImpl) []NavigableExpr {
l := nav.ToExpr().GetListExpr()
navExprs := make([]NavigableExpr, len(l.GetElements()))
for i, e := range l.GetElements() {
navExprs[i] = nav.createChild(e)
}
return navExprs
}
func structEntryFactory(nav *navigableExprImpl) []NavigableExpr {
s := nav.ToExpr().GetStructExpr()
entries := make([]NavigableExpr, len(s.GetEntries()))
for i, e := range s.GetEntries() {
entries[i] = nav.createChild(e.GetValue())
}
return entries
}
func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr {
s := nav.ToExpr().GetStructExpr()
entries := make([]NavigableExpr, len(s.GetEntries())*2)
j := 0
for _, e := range s.GetEntries() {
entries[j] = nav.createChild(e.GetMapKey())
entries[j+1] = nav.createChild(e.GetValue())
j += 2
}
return entries
}
func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr {
compre := nav.ToExpr().GetComprehensionExpr()
return []NavigableExpr{
nav.createChild(compre.GetIterRange()),
nav.createChild(compre.GetAccuInit()),
nav.createChild(compre.GetLoopCondition()),
nav.createChild(compre.GetLoopStep()),
nav.createChild(compre.GetResult()),
}
}

View File

@@ -0,0 +1,39 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
package(
default_visibility = ["//visibility:public"],
licenses = ["notice"], # Apache 2.0
)
go_library(
name = "go_default_library",
srcs = [
"decls.go",
],
importpath = "github.com/google/cel-go/common/decls",
deps = [
"//checker/decls:go_default_library",
"//common/functions:go_default_library",
"//common/types:go_default_library",
"//common/types/ref:go_default_library",
"//common/types/traits:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"decls_test.go",
],
embed = [":go_default_library"],
deps = [
"//checker/decls:go_default_library",
"//common/overloads:go_default_library",
"//common/types:go_default_library",
"//common/types/ref:go_default_library",
"//common/types/traits:go_default_library",
"@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

Some files were not shown because too many files have changed in this diff Show More